VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 44000

Last change on this file since 44000 was 43442, checked in by vboxsync, 12 years ago

Network/e1000: Fixed issue with format handlers registration

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 306.7 KB
Line 
1/* $Id: DevE1000.cpp 43442 2012-09-26 13:21:27Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2011 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28#define LOG_GROUP LOG_GROUP_DEV_E1000
29
30//#define E1kLogRel(a) LogRel(a)
31#define E1kLogRel(a)
32
33/* Options *******************************************************************/
34/*
35 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
36 * table to MAC address obtained from CFGM. Most guests read MAC address from
37 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
38 * being already set (see @bugref{4657}).
39 */
40#define E1K_INIT_RA0
41/*
42 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
43 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
44 * that requires it is Mac OS X (see @bugref{4657}).
45 */
46#define E1K_LSC_ON_SLU
47/*
48 * E1K_ITR_ENABLED reduces the number of interrupts generated by E1000 if a
49 * guest driver requested it by writing non-zero value to the Interrupt
50 * Throttling Register (see section 13.4.18 in "8254x Family of Gigabit
51 * Ethernet Controllers Software Developer’s Manual").
52 */
53//#define E1K_ITR_ENABLED
54/*
55 * E1K_TX_DELAY aims to improve guest-host transfer rate for TCP streams by
56 * preventing packets to be sent immediately. It allows to send several
57 * packets in a batch reducing the number of acknowledgments. Note that it
58 * effectively disables R0 TX path, forcing sending in R3.
59 */
60//#define E1K_TX_DELAY 150
61/*
62 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
63 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
64 * register. Enabling it showed no positive effects on existing guests so it
65 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
66 * Ethernet Controllers Software Developer’s Manual" for more detailed
67 * explanation.
68 */
69//#define E1K_USE_TX_TIMERS
70/*
71 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
72 * Transmit Absolute Delay time. This timer sets the maximum time interval
73 * during which TX interrupts can be postponed (delayed). It has no effect
74 * if E1K_USE_TX_TIMERS is not defined.
75 */
76//#define E1K_NO_TAD
77/*
78 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
79 */
80//#define E1K_REL_DEBUG
81/*
82 * E1K_INT_STATS enables collection of internal statistics used for
83 * debugging of delayed interrupts, etc.
84 */
85//#define E1K_INT_STATS
86/*
87 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
88 */
89//#define E1K_WITH_MSI
90/*
91 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
92 */
93#define E1K_WITH_TX_CS
94/*
95 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
96 * single physical memory read (or two if it wraps around the end of TX
97 * descriptor ring). It is required for proper functioning of bandwidth
98 * resource control as it allows to compute exact sizes of packets prior
99 * to allocating their buffers (see @bugref{5582}).
100 */
101#define E1K_WITH_TXD_CACHE
102/*
103 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
104 * single physical memory read (or two if it wraps around the end of RX
105 * descriptor ring). Intel's packet driver for DOS needs this option in
106 * order to work properly (see @bugref{6217}).
107 */
108#define E1K_WITH_RXD_CACHE
109/* End of Options ************************************************************/
110
111#ifdef E1K_WITH_TXD_CACHE
112/*
113 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
114 * in the state structure. It limits the amount of descriptors loaded in one
115 * batch read. For example, Linux guest may use up to 20 descriptors per
116 * TSE packet. The largest TSE packet seen (Windows guest) was 45 descriptors.
117 */
118#define E1K_TXD_CACHE_SIZE 64u
119#endif /* E1K_WITH_TXD_CACHE */
120
121#ifdef E1K_WITH_RXD_CACHE
122/*
123 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
124 * in the state structure. It limits the amount of descriptors loaded in one
125 * batch read. For example, XP guest adds 15 RX descriptors at a time.
126 */
127#define E1K_RXD_CACHE_SIZE 16u
128#endif /* E1K_WITH_RXD_CACHE */
129
130#include <iprt/crc.h>
131#include <iprt/ctype.h>
132#include <iprt/net.h>
133#include <iprt/semaphore.h>
134#include <iprt/string.h>
135#include <iprt/time.h>
136#include <iprt/uuid.h>
137#include <VBox/vmm/pdmdev.h>
138#include <VBox/vmm/pdmnetifs.h>
139#include <VBox/vmm/pdmnetinline.h>
140#include <VBox/param.h>
141#include "VBoxDD.h"
142
143#include "DevEEPROM.h"
144#include "DevE1000Phy.h"
145
146/* Little helpers ************************************************************/
147#undef htons
148#undef ntohs
149#undef htonl
150#undef ntohl
151#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
152#define ntohs(x) htons(x)
153#define htonl(x) ASMByteSwapU32(x)
154#define ntohl(x) htonl(x)
155
156#ifndef DEBUG
157# ifdef E1K_REL_DEBUG
158# define DEBUG
159# define E1kLog(a) LogRel(a)
160# define E1kLog2(a) LogRel(a)
161# define E1kLog3(a) LogRel(a)
162# define E1kLogX(x, a) LogRel(a)
163//# define E1kLog3(a) do {} while (0)
164# else
165# define E1kLog(a) do {} while (0)
166# define E1kLog2(a) do {} while (0)
167# define E1kLog3(a) do {} while (0)
168# define E1kLogX(x, a) do {} while (0)
169# endif
170#else
171# define E1kLog(a) Log(a)
172# define E1kLog2(a) Log2(a)
173# define E1kLog3(a) Log3(a)
174# define E1kLogX(x, a) LogIt(LOG_INSTANCE, x, LOG_GROUP, a)
175//# define E1kLog(a) do {} while (0)
176//# define E1kLog2(a) do {} while (0)
177//# define E1kLog3(a) do {} while (0)
178#endif
179
180//#undef DEBUG
181
182#define INSTANCE(pState) pState->szInstance
183#define STATE_TO_DEVINS(pState) (((E1KSTATE *)pState)->CTX_SUFF(pDevIns))
184#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
185
186#define E1K_INC_CNT32(cnt) \
187do { \
188 if (cnt < UINT32_MAX) \
189 cnt++; \
190} while (0)
191
192#define E1K_ADD_CNT64(cntLo, cntHi, val) \
193do { \
194 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
195 uint64_t tmp = u64Cnt; \
196 u64Cnt += val; \
197 if (tmp > u64Cnt ) \
198 u64Cnt = UINT64_MAX; \
199 cntLo = (uint32_t)u64Cnt; \
200 cntHi = (uint32_t)(u64Cnt >> 32); \
201} while (0)
202
203#ifdef E1K_INT_STATS
204# define E1K_INC_ISTAT_CNT(cnt) ++cnt
205#else /* E1K_INT_STATS */
206# define E1K_INC_ISTAT_CNT(cnt)
207#endif /* E1K_INT_STATS */
208
209
210/*****************************************************************************/
211
212typedef uint32_t E1KCHIP;
213#define E1K_CHIP_82540EM 0
214#define E1K_CHIP_82543GC 1
215#define E1K_CHIP_82545EM 2
216
217struct E1kChips
218{
219 uint16_t uPCIVendorId;
220 uint16_t uPCIDeviceId;
221 uint16_t uPCISubsystemVendorId;
222 uint16_t uPCISubsystemId;
223 const char *pcszName;
224} g_Chips[] =
225{
226 /* Vendor Device SSVendor SubSys Name */
227 { 0x8086,
228 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
229#ifdef E1K_WITH_MSI
230 0x105E,
231#else
232 0x100E,
233#endif
234 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
235 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
236 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
237};
238
239
240/* The size of register area mapped to I/O space */
241#define E1K_IOPORT_SIZE 0x8
242/* The size of memory-mapped register area */
243#define E1K_MM_SIZE 0x20000
244
245#define E1K_MAX_TX_PKT_SIZE 16288
246#define E1K_MAX_RX_PKT_SIZE 16384
247
248/*****************************************************************************/
249
250/** Gets the specfieid bits from the register. */
251#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
252#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
253#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
254#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
255#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
256
257#define CTRL_SLU 0x00000040
258#define CTRL_MDIO 0x00100000
259#define CTRL_MDC 0x00200000
260#define CTRL_MDIO_DIR 0x01000000
261#define CTRL_MDC_DIR 0x02000000
262#define CTRL_RESET 0x04000000
263#define CTRL_VME 0x40000000
264
265#define STATUS_LU 0x00000002
266#define STATUS_TXOFF 0x00000010
267
268#define EECD_EE_WIRES 0x0F
269#define EECD_EE_REQ 0x40
270#define EECD_EE_GNT 0x80
271
272#define EERD_START 0x00000001
273#define EERD_DONE 0x00000010
274#define EERD_DATA_MASK 0xFFFF0000
275#define EERD_DATA_SHIFT 16
276#define EERD_ADDR_MASK 0x0000FF00
277#define EERD_ADDR_SHIFT 8
278
279#define MDIC_DATA_MASK 0x0000FFFF
280#define MDIC_DATA_SHIFT 0
281#define MDIC_REG_MASK 0x001F0000
282#define MDIC_REG_SHIFT 16
283#define MDIC_PHY_MASK 0x03E00000
284#define MDIC_PHY_SHIFT 21
285#define MDIC_OP_WRITE 0x04000000
286#define MDIC_OP_READ 0x08000000
287#define MDIC_READY 0x10000000
288#define MDIC_INT_EN 0x20000000
289#define MDIC_ERROR 0x40000000
290
291#define TCTL_EN 0x00000002
292#define TCTL_PSP 0x00000008
293
294#define RCTL_EN 0x00000002
295#define RCTL_UPE 0x00000008
296#define RCTL_MPE 0x00000010
297#define RCTL_LPE 0x00000020
298#define RCTL_LBM_MASK 0x000000C0
299#define RCTL_LBM_SHIFT 6
300#define RCTL_RDMTS_MASK 0x00000300
301#define RCTL_RDMTS_SHIFT 8
302#define RCTL_LBM_TCVR 3 /**< PHY or external SerDes loopback. */
303#define RCTL_MO_MASK 0x00003000
304#define RCTL_MO_SHIFT 12
305#define RCTL_BAM 0x00008000
306#define RCTL_BSIZE_MASK 0x00030000
307#define RCTL_BSIZE_SHIFT 16
308#define RCTL_VFE 0x00040000
309#define RCTL_CFIEN 0x00080000
310#define RCTL_CFI 0x00100000
311#define RCTL_BSEX 0x02000000
312#define RCTL_SECRC 0x04000000
313
314#define ICR_TXDW 0x00000001
315#define ICR_TXQE 0x00000002
316#define ICR_LSC 0x00000004
317#define ICR_RXDMT0 0x00000010
318#define ICR_RXT0 0x00000080
319#define ICR_TXD_LOW 0x00008000
320#define RDTR_FPD 0x80000000
321
322#define PBA_st ((PBAST*)(pState->auRegs + PBA_IDX))
323typedef struct
324{
325 unsigned rxa : 7;
326 unsigned rxa_r : 9;
327 unsigned txa : 16;
328} PBAST;
329AssertCompileSize(PBAST, 4);
330
331#define TXDCTL_WTHRESH_MASK 0x003F0000
332#define TXDCTL_WTHRESH_SHIFT 16
333#define TXDCTL_LWTHRESH_MASK 0xFE000000
334#define TXDCTL_LWTHRESH_SHIFT 25
335
336#define RXCSUM_PCSS_MASK 0x000000FF
337#define RXCSUM_PCSS_SHIFT 0
338
339/* Register access macros ****************************************************/
340#define CTRL pState->auRegs[CTRL_IDX]
341#define STATUS pState->auRegs[STATUS_IDX]
342#define EECD pState->auRegs[EECD_IDX]
343#define EERD pState->auRegs[EERD_IDX]
344#define CTRL_EXT pState->auRegs[CTRL_EXT_IDX]
345#define FLA pState->auRegs[FLA_IDX]
346#define MDIC pState->auRegs[MDIC_IDX]
347#define FCAL pState->auRegs[FCAL_IDX]
348#define FCAH pState->auRegs[FCAH_IDX]
349#define FCT pState->auRegs[FCT_IDX]
350#define VET pState->auRegs[VET_IDX]
351#define ICR pState->auRegs[ICR_IDX]
352#define ITR pState->auRegs[ITR_IDX]
353#define ICS pState->auRegs[ICS_IDX]
354#define IMS pState->auRegs[IMS_IDX]
355#define IMC pState->auRegs[IMC_IDX]
356#define RCTL pState->auRegs[RCTL_IDX]
357#define FCTTV pState->auRegs[FCTTV_IDX]
358#define TXCW pState->auRegs[TXCW_IDX]
359#define RXCW pState->auRegs[RXCW_IDX]
360#define TCTL pState->auRegs[TCTL_IDX]
361#define TIPG pState->auRegs[TIPG_IDX]
362#define AIFS pState->auRegs[AIFS_IDX]
363#define LEDCTL pState->auRegs[LEDCTL_IDX]
364#define PBA pState->auRegs[PBA_IDX]
365#define FCRTL pState->auRegs[FCRTL_IDX]
366#define FCRTH pState->auRegs[FCRTH_IDX]
367#define RDFH pState->auRegs[RDFH_IDX]
368#define RDFT pState->auRegs[RDFT_IDX]
369#define RDFHS pState->auRegs[RDFHS_IDX]
370#define RDFTS pState->auRegs[RDFTS_IDX]
371#define RDFPC pState->auRegs[RDFPC_IDX]
372#define RDBAL pState->auRegs[RDBAL_IDX]
373#define RDBAH pState->auRegs[RDBAH_IDX]
374#define RDLEN pState->auRegs[RDLEN_IDX]
375#define RDH pState->auRegs[RDH_IDX]
376#define RDT pState->auRegs[RDT_IDX]
377#define RDTR pState->auRegs[RDTR_IDX]
378#define RXDCTL pState->auRegs[RXDCTL_IDX]
379#define RADV pState->auRegs[RADV_IDX]
380#define RSRPD pState->auRegs[RSRPD_IDX]
381#define TXDMAC pState->auRegs[TXDMAC_IDX]
382#define TDFH pState->auRegs[TDFH_IDX]
383#define TDFT pState->auRegs[TDFT_IDX]
384#define TDFHS pState->auRegs[TDFHS_IDX]
385#define TDFTS pState->auRegs[TDFTS_IDX]
386#define TDFPC pState->auRegs[TDFPC_IDX]
387#define TDBAL pState->auRegs[TDBAL_IDX]
388#define TDBAH pState->auRegs[TDBAH_IDX]
389#define TDLEN pState->auRegs[TDLEN_IDX]
390#define TDH pState->auRegs[TDH_IDX]
391#define TDT pState->auRegs[TDT_IDX]
392#define TIDV pState->auRegs[TIDV_IDX]
393#define TXDCTL pState->auRegs[TXDCTL_IDX]
394#define TADV pState->auRegs[TADV_IDX]
395#define TSPMT pState->auRegs[TSPMT_IDX]
396#define CRCERRS pState->auRegs[CRCERRS_IDX]
397#define ALGNERRC pState->auRegs[ALGNERRC_IDX]
398#define SYMERRS pState->auRegs[SYMERRS_IDX]
399#define RXERRC pState->auRegs[RXERRC_IDX]
400#define MPC pState->auRegs[MPC_IDX]
401#define SCC pState->auRegs[SCC_IDX]
402#define ECOL pState->auRegs[ECOL_IDX]
403#define MCC pState->auRegs[MCC_IDX]
404#define LATECOL pState->auRegs[LATECOL_IDX]
405#define COLC pState->auRegs[COLC_IDX]
406#define DC pState->auRegs[DC_IDX]
407#define TNCRS pState->auRegs[TNCRS_IDX]
408#define SEC pState->auRegs[SEC_IDX]
409#define CEXTERR pState->auRegs[CEXTERR_IDX]
410#define RLEC pState->auRegs[RLEC_IDX]
411#define XONRXC pState->auRegs[XONRXC_IDX]
412#define XONTXC pState->auRegs[XONTXC_IDX]
413#define XOFFRXC pState->auRegs[XOFFRXC_IDX]
414#define XOFFTXC pState->auRegs[XOFFTXC_IDX]
415#define FCRUC pState->auRegs[FCRUC_IDX]
416#define PRC64 pState->auRegs[PRC64_IDX]
417#define PRC127 pState->auRegs[PRC127_IDX]
418#define PRC255 pState->auRegs[PRC255_IDX]
419#define PRC511 pState->auRegs[PRC511_IDX]
420#define PRC1023 pState->auRegs[PRC1023_IDX]
421#define PRC1522 pState->auRegs[PRC1522_IDX]
422#define GPRC pState->auRegs[GPRC_IDX]
423#define BPRC pState->auRegs[BPRC_IDX]
424#define MPRC pState->auRegs[MPRC_IDX]
425#define GPTC pState->auRegs[GPTC_IDX]
426#define GORCL pState->auRegs[GORCL_IDX]
427#define GORCH pState->auRegs[GORCH_IDX]
428#define GOTCL pState->auRegs[GOTCL_IDX]
429#define GOTCH pState->auRegs[GOTCH_IDX]
430#define RNBC pState->auRegs[RNBC_IDX]
431#define RUC pState->auRegs[RUC_IDX]
432#define RFC pState->auRegs[RFC_IDX]
433#define ROC pState->auRegs[ROC_IDX]
434#define RJC pState->auRegs[RJC_IDX]
435#define MGTPRC pState->auRegs[MGTPRC_IDX]
436#define MGTPDC pState->auRegs[MGTPDC_IDX]
437#define MGTPTC pState->auRegs[MGTPTC_IDX]
438#define TORL pState->auRegs[TORL_IDX]
439#define TORH pState->auRegs[TORH_IDX]
440#define TOTL pState->auRegs[TOTL_IDX]
441#define TOTH pState->auRegs[TOTH_IDX]
442#define TPR pState->auRegs[TPR_IDX]
443#define TPT pState->auRegs[TPT_IDX]
444#define PTC64 pState->auRegs[PTC64_IDX]
445#define PTC127 pState->auRegs[PTC127_IDX]
446#define PTC255 pState->auRegs[PTC255_IDX]
447#define PTC511 pState->auRegs[PTC511_IDX]
448#define PTC1023 pState->auRegs[PTC1023_IDX]
449#define PTC1522 pState->auRegs[PTC1522_IDX]
450#define MPTC pState->auRegs[MPTC_IDX]
451#define BPTC pState->auRegs[BPTC_IDX]
452#define TSCTC pState->auRegs[TSCTC_IDX]
453#define TSCTFC pState->auRegs[TSCTFC_IDX]
454#define RXCSUM pState->auRegs[RXCSUM_IDX]
455#define WUC pState->auRegs[WUC_IDX]
456#define WUFC pState->auRegs[WUFC_IDX]
457#define WUS pState->auRegs[WUS_IDX]
458#define MANC pState->auRegs[MANC_IDX]
459#define IPAV pState->auRegs[IPAV_IDX]
460#define WUPL pState->auRegs[WUPL_IDX]
461
462/**
463 * Indices of memory-mapped registers in register table
464 */
465typedef enum
466{
467 CTRL_IDX,
468 STATUS_IDX,
469 EECD_IDX,
470 EERD_IDX,
471 CTRL_EXT_IDX,
472 FLA_IDX,
473 MDIC_IDX,
474 FCAL_IDX,
475 FCAH_IDX,
476 FCT_IDX,
477 VET_IDX,
478 ICR_IDX,
479 ITR_IDX,
480 ICS_IDX,
481 IMS_IDX,
482 IMC_IDX,
483 RCTL_IDX,
484 FCTTV_IDX,
485 TXCW_IDX,
486 RXCW_IDX,
487 TCTL_IDX,
488 TIPG_IDX,
489 AIFS_IDX,
490 LEDCTL_IDX,
491 PBA_IDX,
492 FCRTL_IDX,
493 FCRTH_IDX,
494 RDFH_IDX,
495 RDFT_IDX,
496 RDFHS_IDX,
497 RDFTS_IDX,
498 RDFPC_IDX,
499 RDBAL_IDX,
500 RDBAH_IDX,
501 RDLEN_IDX,
502 RDH_IDX,
503 RDT_IDX,
504 RDTR_IDX,
505 RXDCTL_IDX,
506 RADV_IDX,
507 RSRPD_IDX,
508 TXDMAC_IDX,
509 TDFH_IDX,
510 TDFT_IDX,
511 TDFHS_IDX,
512 TDFTS_IDX,
513 TDFPC_IDX,
514 TDBAL_IDX,
515 TDBAH_IDX,
516 TDLEN_IDX,
517 TDH_IDX,
518 TDT_IDX,
519 TIDV_IDX,
520 TXDCTL_IDX,
521 TADV_IDX,
522 TSPMT_IDX,
523 CRCERRS_IDX,
524 ALGNERRC_IDX,
525 SYMERRS_IDX,
526 RXERRC_IDX,
527 MPC_IDX,
528 SCC_IDX,
529 ECOL_IDX,
530 MCC_IDX,
531 LATECOL_IDX,
532 COLC_IDX,
533 DC_IDX,
534 TNCRS_IDX,
535 SEC_IDX,
536 CEXTERR_IDX,
537 RLEC_IDX,
538 XONRXC_IDX,
539 XONTXC_IDX,
540 XOFFRXC_IDX,
541 XOFFTXC_IDX,
542 FCRUC_IDX,
543 PRC64_IDX,
544 PRC127_IDX,
545 PRC255_IDX,
546 PRC511_IDX,
547 PRC1023_IDX,
548 PRC1522_IDX,
549 GPRC_IDX,
550 BPRC_IDX,
551 MPRC_IDX,
552 GPTC_IDX,
553 GORCL_IDX,
554 GORCH_IDX,
555 GOTCL_IDX,
556 GOTCH_IDX,
557 RNBC_IDX,
558 RUC_IDX,
559 RFC_IDX,
560 ROC_IDX,
561 RJC_IDX,
562 MGTPRC_IDX,
563 MGTPDC_IDX,
564 MGTPTC_IDX,
565 TORL_IDX,
566 TORH_IDX,
567 TOTL_IDX,
568 TOTH_IDX,
569 TPR_IDX,
570 TPT_IDX,
571 PTC64_IDX,
572 PTC127_IDX,
573 PTC255_IDX,
574 PTC511_IDX,
575 PTC1023_IDX,
576 PTC1522_IDX,
577 MPTC_IDX,
578 BPTC_IDX,
579 TSCTC_IDX,
580 TSCTFC_IDX,
581 RXCSUM_IDX,
582 WUC_IDX,
583 WUFC_IDX,
584 WUS_IDX,
585 MANC_IDX,
586 IPAV_IDX,
587 WUPL_IDX,
588 MTA_IDX,
589 RA_IDX,
590 VFTA_IDX,
591 IP4AT_IDX,
592 IP6AT_IDX,
593 WUPM_IDX,
594 FFLT_IDX,
595 FFMT_IDX,
596 FFVT_IDX,
597 PBM_IDX,
598 RA_82542_IDX,
599 MTA_82542_IDX,
600 VFTA_82542_IDX,
601 E1K_NUM_OF_REGS
602} E1kRegIndex;
603
604#define E1K_NUM_OF_32BIT_REGS MTA_IDX
605
606
607/**
608 * Define E1000-specific EEPROM layout.
609 */
610class E1kEEPROM
611{
612 public:
613 EEPROM93C46 eeprom;
614
615#ifdef IN_RING3
616 /**
617 * Initialize EEPROM content.
618 *
619 * @param macAddr MAC address of E1000.
620 */
621 void init(RTMAC &macAddr)
622 {
623 eeprom.init();
624 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
625 eeprom.m_au16Data[0x04] = 0xFFFF;
626 /*
627 * bit 3 - full support for power management
628 * bit 10 - full duplex
629 */
630 eeprom.m_au16Data[0x0A] = 0x4408;
631 eeprom.m_au16Data[0x0B] = 0x001E;
632 eeprom.m_au16Data[0x0C] = 0x8086;
633 eeprom.m_au16Data[0x0D] = 0x100E;
634 eeprom.m_au16Data[0x0E] = 0x8086;
635 eeprom.m_au16Data[0x0F] = 0x3040;
636 eeprom.m_au16Data[0x21] = 0x7061;
637 eeprom.m_au16Data[0x22] = 0x280C;
638 eeprom.m_au16Data[0x23] = 0x00C8;
639 eeprom.m_au16Data[0x24] = 0x00C8;
640 eeprom.m_au16Data[0x2F] = 0x0602;
641 updateChecksum();
642 };
643
644 /**
645 * Compute the checksum as required by E1000 and store it
646 * in the last word.
647 */
648 void updateChecksum()
649 {
650 uint16_t u16Checksum = 0;
651
652 for (int i = 0; i < eeprom.SIZE-1; i++)
653 u16Checksum += eeprom.m_au16Data[i];
654 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
655 };
656
657 /**
658 * First 6 bytes of EEPROM contain MAC address.
659 *
660 * @returns MAC address of E1000.
661 */
662 void getMac(PRTMAC pMac)
663 {
664 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
665 };
666
667 uint32_t read()
668 {
669 return eeprom.read();
670 }
671
672 void write(uint32_t u32Wires)
673 {
674 eeprom.write(u32Wires);
675 }
676
677 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
678 {
679 return eeprom.readWord(u32Addr, pu16Value);
680 }
681
682 int load(PSSMHANDLE pSSM)
683 {
684 return eeprom.load(pSSM);
685 }
686
687 void save(PSSMHANDLE pSSM)
688 {
689 eeprom.save(pSSM);
690 }
691#endif /* IN_RING3 */
692};
693
694
695#define E1K_SPEC_VLAN(s) (s & 0xFFF)
696#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
697#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
698
699struct E1kRxDStatus
700{
701 /** @name Descriptor Status field (3.2.3.1)
702 * @{ */
703 unsigned fDD : 1; /**< Descriptor Done. */
704 unsigned fEOP : 1; /**< End of packet. */
705 unsigned fIXSM : 1; /**< Ignore checksum indication. */
706 unsigned fVP : 1; /**< VLAN, matches VET. */
707 unsigned : 1;
708 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
709 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
710 unsigned fPIF : 1; /**< Passed in-exact filter */
711 /** @} */
712 /** @name Descriptor Errors field (3.2.3.2)
713 * (Only valid when fEOP and fDD are set.)
714 * @{ */
715 unsigned fCE : 1; /**< CRC or alignment error. */
716 unsigned : 4; /**< Reserved, varies with different models... */
717 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
718 unsigned fIPE : 1; /**< IP Checksum error. */
719 unsigned fRXE : 1; /**< RX Data error. */
720 /** @} */
721 /** @name Descriptor Special field (3.2.3.3)
722 * @{ */
723 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
724 /** @} */
725};
726typedef struct E1kRxDStatus E1KRXDST;
727
728struct E1kRxDesc_st
729{
730 uint64_t u64BufAddr; /**< Address of data buffer */
731 uint16_t u16Length; /**< Length of data in buffer */
732 uint16_t u16Checksum; /**< Packet checksum */
733 E1KRXDST status;
734};
735typedef struct E1kRxDesc_st E1KRXDESC;
736AssertCompileSize(E1KRXDESC, 16);
737
738#define E1K_DTYP_LEGACY -1
739#define E1K_DTYP_CONTEXT 0
740#define E1K_DTYP_DATA 1
741
742struct E1kTDLegacy
743{
744 uint64_t u64BufAddr; /**< Address of data buffer */
745 struct TDLCmd_st
746 {
747 unsigned u16Length : 16;
748 unsigned u8CSO : 8;
749 /* CMD field : 8 */
750 unsigned fEOP : 1;
751 unsigned fIFCS : 1;
752 unsigned fIC : 1;
753 unsigned fRS : 1;
754 unsigned fRPS : 1;
755 unsigned fDEXT : 1;
756 unsigned fVLE : 1;
757 unsigned fIDE : 1;
758 } cmd;
759 struct TDLDw3_st
760 {
761 /* STA field */
762 unsigned fDD : 1;
763 unsigned fEC : 1;
764 unsigned fLC : 1;
765 unsigned fTURSV : 1;
766 /* RSV field */
767 unsigned u4RSV : 4;
768 /* CSS field */
769 unsigned u8CSS : 8;
770 /* Special field*/
771 unsigned u16Special: 16;
772 } dw3;
773};
774
775/**
776 * TCP/IP Context Transmit Descriptor, section 3.3.6.
777 */
778struct E1kTDContext
779{
780 struct CheckSum_st
781 {
782 /** TSE: Header start. !TSE: Checksum start. */
783 unsigned u8CSS : 8;
784 /** Checksum offset - where to store it. */
785 unsigned u8CSO : 8;
786 /** Checksum ending (inclusive) offset, 0 = end of packet. */
787 unsigned u16CSE : 16;
788 } ip;
789 struct CheckSum_st tu;
790 struct TDCDw2_st
791 {
792 /** TSE: The total number of payload bytes for this context. Sans header. */
793 unsigned u20PAYLEN : 20;
794 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
795 unsigned u4DTYP : 4;
796 /** TUCMD field, 8 bits
797 * @{ */
798 /** TSE: TCP (set) or UDP (clear). */
799 unsigned fTCP : 1;
800 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
801 * the IP header. Does not affect the checksumming.
802 * @remarks 82544GC/EI interprets a cleared field differently. */
803 unsigned fIP : 1;
804 /** TSE: TCP segmentation enable. When clear the context describes */
805 unsigned fTSE : 1;
806 /** Report status (only applies to dw3.fDD for here). */
807 unsigned fRS : 1;
808 /** Reserved, MBZ. */
809 unsigned fRSV1 : 1;
810 /** Descriptor extension, must be set for this descriptor type. */
811 unsigned fDEXT : 1;
812 /** Reserved, MBZ. */
813 unsigned fRSV2 : 1;
814 /** Interrupt delay enable. */
815 unsigned fIDE : 1;
816 /** @} */
817 } dw2;
818 struct TDCDw3_st
819 {
820 /** Descriptor Done. */
821 unsigned fDD : 1;
822 /** Reserved, MBZ. */
823 unsigned u7RSV : 7;
824 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
825 unsigned u8HDRLEN : 8;
826 /** TSO: Maximum segment size. */
827 unsigned u16MSS : 16;
828 } dw3;
829};
830typedef struct E1kTDContext E1KTXCTX;
831
832/**
833 * TCP/IP Data Transmit Descriptor, section 3.3.7.
834 */
835struct E1kTDData
836{
837 uint64_t u64BufAddr; /**< Address of data buffer */
838 struct TDDCmd_st
839 {
840 /** The total length of data pointed to by this descriptor. */
841 unsigned u20DTALEN : 20;
842 /** The descriptor type - E1K_DTYP_DATA (1). */
843 unsigned u4DTYP : 4;
844 /** @name DCMD field, 8 bits (3.3.7.1).
845 * @{ */
846 /** End of packet. Note TSCTFC update. */
847 unsigned fEOP : 1;
848 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
849 unsigned fIFCS : 1;
850 /** Use the TSE context when set and the normal when clear. */
851 unsigned fTSE : 1;
852 /** Report status (dw3.STA). */
853 unsigned fRS : 1;
854 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
855 unsigned fRPS : 1;
856 /** Descriptor extension, must be set for this descriptor type. */
857 unsigned fDEXT : 1;
858 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
859 * Insert dw3.SPECIAL after ethernet header. */
860 unsigned fVLE : 1;
861 /** Interrupt delay enable. */
862 unsigned fIDE : 1;
863 /** @} */
864 } cmd;
865 struct TDDDw3_st
866 {
867 /** @name STA field (3.3.7.2)
868 * @{ */
869 unsigned fDD : 1; /**< Descriptor done. */
870 unsigned fEC : 1; /**< Excess collision. */
871 unsigned fLC : 1; /**< Late collision. */
872 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
873 unsigned fTURSV : 1;
874 /** @} */
875 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
876 /** @name POPTS (Packet Option) field (3.3.7.3)
877 * @{ */
878 unsigned fIXSM : 1; /**< Insert IP checksum. */
879 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
880 unsigned u6RSV : 6; /**< Reserved, MBZ. */
881 /** @} */
882 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
883 * Requires fEOP, fVLE and CTRL.VME to be set.
884 * @{ */
885 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
886 /** @} */
887 } dw3;
888};
889typedef struct E1kTDData E1KTXDAT;
890
891union E1kTxDesc
892{
893 struct E1kTDLegacy legacy;
894 struct E1kTDContext context;
895 struct E1kTDData data;
896};
897typedef union E1kTxDesc E1KTXDESC;
898AssertCompileSize(E1KTXDESC, 16);
899
900#define RA_CTL_AS 0x0003
901#define RA_CTL_AV 0x8000
902
903union E1kRecAddr
904{
905 uint32_t au32[32];
906 struct RAArray
907 {
908 uint8_t addr[6];
909 uint16_t ctl;
910 } array[16];
911};
912typedef struct E1kRecAddr::RAArray E1KRAELEM;
913typedef union E1kRecAddr E1KRA;
914AssertCompileSize(E1KRA, 8*16);
915
916#define E1K_IP_RF 0x8000 /* reserved fragment flag */
917#define E1K_IP_DF 0x4000 /* dont fragment flag */
918#define E1K_IP_MF 0x2000 /* more fragments flag */
919#define E1K_IP_OFFMASK 0x1fff /* mask for fragmenting bits */
920
921/** @todo use+extend RTNETIPV4 */
922struct E1kIpHeader
923{
924 /* type of service / version / header length */
925 uint16_t tos_ver_hl;
926 /* total length */
927 uint16_t total_len;
928 /* identification */
929 uint16_t ident;
930 /* fragment offset field */
931 uint16_t offset;
932 /* time to live / protocol*/
933 uint16_t ttl_proto;
934 /* checksum */
935 uint16_t chksum;
936 /* source IP address */
937 uint32_t src;
938 /* destination IP address */
939 uint32_t dest;
940};
941AssertCompileSize(struct E1kIpHeader, 20);
942
943#define E1K_TCP_FIN 0x01U
944#define E1K_TCP_SYN 0x02U
945#define E1K_TCP_RST 0x04U
946#define E1K_TCP_PSH 0x08U
947#define E1K_TCP_ACK 0x10U
948#define E1K_TCP_URG 0x20U
949#define E1K_TCP_ECE 0x40U
950#define E1K_TCP_CWR 0x80U
951
952#define E1K_TCP_FLAGS 0x3fU
953
954/** @todo use+extend RTNETTCP */
955struct E1kTcpHeader
956{
957 uint16_t src;
958 uint16_t dest;
959 uint32_t seqno;
960 uint32_t ackno;
961 uint16_t hdrlen_flags;
962 uint16_t wnd;
963 uint16_t chksum;
964 uint16_t urgp;
965};
966AssertCompileSize(struct E1kTcpHeader, 20);
967
968
969#ifdef E1K_WITH_TXD_CACHE
970/** The current Saved state version. */
971#define E1K_SAVEDSTATE_VERSION 4
972/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
973#define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
974#else /* !E1K_WITH_TXD_CACHE */
975/** The current Saved state version. */
976#define E1K_SAVEDSTATE_VERSION 3
977#endif /* !E1K_WITH_TXD_CACHE */
978/** Saved state version for VirtualBox 4.1 and earlier.
979 * These did not include VLAN tag fields. */
980#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
981/** Saved state version for VirtualBox 3.0 and earlier.
982 * This did not include the configuration part nor the E1kEEPROM. */
983#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
984
985/**
986 * Device state structure. Holds the current state of device.
987 *
988 * @implements PDMINETWORKDOWN
989 * @implements PDMINETWORKCONFIG
990 * @implements PDMILEDPORTS
991 */
992struct E1kState_st
993{
994 char szInstance[8]; /**< Instance name, e.g. E1000#1. */
995 PDMIBASE IBase;
996 PDMINETWORKDOWN INetworkDown;
997 PDMINETWORKCONFIG INetworkConfig;
998 PDMILEDPORTS ILeds; /**< LED interface */
999 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
1000 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
1001
1002 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
1003 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
1004 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
1005 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
1006 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
1007 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1008 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1009 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1010 PTMTIMERR3 pTXDTimerR3; /**< Transmit Delay Timer - R3. */
1011 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1012 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1013 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1014 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1015
1016 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1017 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1018 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1019 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1020 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1021 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1022 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1023 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1024 PTMTIMERR0 pTXDTimerR0; /**< Transmit Delay Timer - R0. */
1025 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1026 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1027 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1028 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1029
1030 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1031 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1032 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1033 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1034 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1035 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1036 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1037 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1038 PTMTIMERRC pTXDTimerRC; /**< Transmit Delay Timer - RC. */
1039 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1040 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1041 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1042 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1043 RTRCPTR RCPtrAlignment;
1044
1045#if HC_ARCH_BITS != 32
1046 uint32_t Alignment1;
1047#endif
1048 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1049 PDMCRITSECT csRx; /**< RX Critical section. */
1050#ifdef E1K_WITH_TX_CS
1051 PDMCRITSECT csTx; /**< TX Critical section. */
1052#endif /* E1K_WITH_TX_CS */
1053 /** Base address of memory-mapped registers. */
1054 RTGCPHYS addrMMReg;
1055 /** MAC address obtained from the configuration. */
1056 RTMAC macConfigured;
1057 /** Base port of I/O space region. */
1058 RTIOPORT addrIOPort;
1059 /** EMT: */
1060 PCIDEVICE pciDevice;
1061 /** EMT: Last time the interrupt was acknowledged. */
1062 uint64_t u64AckedAt;
1063 /** All: Used for eliminating spurious interrupts. */
1064 bool fIntRaised;
1065 /** EMT: false if the cable is disconnected by the GUI. */
1066 bool fCableConnected;
1067 /** EMT: */
1068 bool fR0Enabled;
1069 /** EMT: */
1070 bool fGCEnabled;
1071 /** EMT: Compute Ethernet CRC for RX packets. */
1072 bool fEthernetCRC;
1073
1074 bool Alignment2[3];
1075 /** Link up delay (in milliseconds). */
1076 uint32_t cMsLinkUpDelay;
1077
1078 /** All: Device register storage. */
1079 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1080 /** TX/RX: Status LED. */
1081 PDMLED led;
1082 /** TX/RX: Number of packet being sent/received to show in debug log. */
1083 uint32_t u32PktNo;
1084
1085 /** EMT: Offset of the register to be read via IO. */
1086 uint32_t uSelectedReg;
1087 /** EMT: Multicast Table Array. */
1088 uint32_t auMTA[128];
1089 /** EMT: Receive Address registers. */
1090 E1KRA aRecAddr;
1091 /** EMT: VLAN filter table array. */
1092 uint32_t auVFTA[128];
1093 /** EMT: Receive buffer size. */
1094 uint16_t u16RxBSize;
1095 /** EMT: Locked state -- no state alteration possible. */
1096 bool fLocked;
1097 /** EMT: */
1098 bool fDelayInts;
1099 /** All: */
1100 bool fIntMaskUsed;
1101
1102 /** N/A: */
1103 bool volatile fMaybeOutOfSpace;
1104 /** EMT: Gets signalled when more RX descriptors become available. */
1105 RTSEMEVENT hEventMoreRxDescAvail;
1106#ifdef E1K_WITH_RXD_CACHE
1107 /** RX: Fetched RX descriptors. */
1108 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1109 //uint64_t aRxDescAddr[E1K_RXD_CACHE_SIZE];
1110 /** RX: Actual number of fetched RX descriptors. */
1111 uint32_t nRxDFetched;
1112 /** RX: Index in cache of RX descriptor being processed. */
1113 uint32_t iRxDCurrent;
1114#endif /* E1K_WITH_RXD_CACHE */
1115
1116 /** TX: Context used for TCP segmentation packets. */
1117 E1KTXCTX contextTSE;
1118 /** TX: Context used for ordinary packets. */
1119 E1KTXCTX contextNormal;
1120#ifdef E1K_WITH_TXD_CACHE
1121 /** TX: Fetched TX descriptors. */
1122 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1123 /** TX: Actual number of fetched TX descriptors. */
1124 uint8_t nTxDFetched;
1125 /** TX: Index in cache of TX descriptor being processed. */
1126 uint8_t iTxDCurrent;
1127 /** TX: Will this frame be sent as GSO. */
1128 bool fGSO;
1129 /** TX: False will force segmentation in e1000 instead of sending frames as GSO. */
1130 bool fGSOEnabled;
1131 /** TX: Number of bytes in next packet. */
1132 uint32_t cbTxAlloc;
1133
1134#endif /* E1K_WITH_TXD_CACHE */
1135 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1136 * applicable to the current TSE mode. */
1137 PDMNETWORKGSO GsoCtx;
1138 /** Scratch space for holding the loopback / fallback scatter / gather
1139 * descriptor. */
1140 union
1141 {
1142 PDMSCATTERGATHER Sg;
1143 uint8_t padding[8 * sizeof(RTUINTPTR)];
1144 } uTxFallback;
1145 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1146 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1147 /** TX: Number of bytes assembled in TX packet buffer. */
1148 uint16_t u16TxPktLen;
1149 /** TX: IP checksum has to be inserted if true. */
1150 bool fIPcsum;
1151 /** TX: TCP/UDP checksum has to be inserted if true. */
1152 bool fTCPcsum;
1153 /** TX: VLAN tag has to be inserted if true. */
1154 bool fVTag;
1155 /** TX: TCI part of VLAN tag to be inserted. */
1156 uint16_t u16VTagTCI;
1157 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1158 uint32_t u32PayRemain;
1159 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1160 uint16_t u16HdrRemain;
1161 /** TX TSE fallback: Flags from template header. */
1162 uint16_t u16SavedFlags;
1163 /** TX TSE fallback: Partial checksum from template header. */
1164 uint32_t u32SavedCsum;
1165 /** ?: Emulated controller type. */
1166 E1KCHIP eChip;
1167
1168 /** EMT: EEPROM emulation */
1169 E1kEEPROM eeprom;
1170 /** EMT: Physical interface emulation. */
1171 PHY phy;
1172
1173#if 0
1174 /** Alignment padding. */
1175 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1176#endif
1177
1178 STAMCOUNTER StatReceiveBytes;
1179 STAMCOUNTER StatTransmitBytes;
1180#if defined(VBOX_WITH_STATISTICS)
1181 STAMPROFILEADV StatMMIOReadRZ;
1182 STAMPROFILEADV StatMMIOReadR3;
1183 STAMPROFILEADV StatMMIOWriteRZ;
1184 STAMPROFILEADV StatMMIOWriteR3;
1185 STAMPROFILEADV StatEEPROMRead;
1186 STAMPROFILEADV StatEEPROMWrite;
1187 STAMPROFILEADV StatIOReadRZ;
1188 STAMPROFILEADV StatIOReadR3;
1189 STAMPROFILEADV StatIOWriteRZ;
1190 STAMPROFILEADV StatIOWriteR3;
1191 STAMPROFILEADV StatLateIntTimer;
1192 STAMCOUNTER StatLateInts;
1193 STAMCOUNTER StatIntsRaised;
1194 STAMCOUNTER StatIntsPrevented;
1195 STAMPROFILEADV StatReceive;
1196 STAMPROFILEADV StatReceiveCRC;
1197 STAMPROFILEADV StatReceiveFilter;
1198 STAMPROFILEADV StatReceiveStore;
1199 STAMPROFILEADV StatTransmitRZ;
1200 STAMPROFILEADV StatTransmitR3;
1201 STAMPROFILE StatTransmitSendRZ;
1202 STAMPROFILE StatTransmitSendR3;
1203 STAMPROFILE StatRxOverflow;
1204 STAMCOUNTER StatRxOverflowWakeup;
1205 STAMCOUNTER StatTxDescCtxNormal;
1206 STAMCOUNTER StatTxDescCtxTSE;
1207 STAMCOUNTER StatTxDescLegacy;
1208 STAMCOUNTER StatTxDescData;
1209 STAMCOUNTER StatTxDescTSEData;
1210 STAMCOUNTER StatTxPathFallback;
1211 STAMCOUNTER StatTxPathGSO;
1212 STAMCOUNTER StatTxPathRegular;
1213 STAMCOUNTER StatPHYAccesses;
1214
1215#endif /* VBOX_WITH_STATISTICS */
1216
1217#ifdef E1K_INT_STATS
1218 /* Internal stats */
1219 uint64_t u64ArmedAt;
1220 uint64_t uStatMaxTxDelay;
1221 uint32_t uStatInt;
1222 uint32_t uStatIntTry;
1223 int32_t uStatIntLower;
1224 uint32_t uStatIntDly;
1225 int32_t iStatIntLost;
1226 int32_t iStatIntLostOne;
1227 uint32_t uStatDisDly;
1228 uint32_t uStatIntSkip;
1229 uint32_t uStatIntLate;
1230 uint32_t uStatIntMasked;
1231 uint32_t uStatIntEarly;
1232 uint32_t uStatIntRx;
1233 uint32_t uStatIntTx;
1234 uint32_t uStatIntICS;
1235 uint32_t uStatIntRDTR;
1236 uint32_t uStatIntRXDMT0;
1237 uint32_t uStatIntTXQE;
1238 uint32_t uStatTxNoRS;
1239 uint32_t uStatTxIDE;
1240 uint32_t uStatTxDelayed;
1241 uint32_t uStatTxDelayExp;
1242 uint32_t uStatTAD;
1243 uint32_t uStatTID;
1244 uint32_t uStatRAD;
1245 uint32_t uStatRID;
1246 uint32_t uStatRxFrm;
1247 uint32_t uStatTxFrm;
1248 uint32_t uStatDescCtx;
1249 uint32_t uStatDescDat;
1250 uint32_t uStatDescLeg;
1251 uint32_t uStatTx1514;
1252 uint32_t uStatTx2962;
1253 uint32_t uStatTx4410;
1254 uint32_t uStatTx5858;
1255 uint32_t uStatTx7306;
1256 uint32_t uStatTx8754;
1257 uint32_t uStatTx16384;
1258 uint32_t uStatTx32768;
1259 uint32_t uStatTxLarge;
1260 uint32_t uStatAlign;
1261#endif /* E1K_INT_STATS */
1262};
1263typedef struct E1kState_st E1KSTATE;
1264
1265#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1266
1267/* Forward declarations ******************************************************/
1268static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread);
1269
1270static int e1kRegReadUnimplemented (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1271static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1272static int e1kRegReadAutoClear (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1273static int e1kRegReadDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1274static int e1kRegWriteDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1275#if 0 /* unused */
1276static int e1kRegReadCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1277#endif
1278static int e1kRegWriteCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1279static int e1kRegReadEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1280static int e1kRegWriteEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1281static int e1kRegWriteEERD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1282static int e1kRegWriteMDIC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1283static int e1kRegReadICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1284static int e1kRegWriteICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1285static int e1kRegWriteICS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1286static int e1kRegWriteIMS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1287static int e1kRegWriteIMC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1288static int e1kRegWriteRCTL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1289static int e1kRegWritePBA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1290static int e1kRegWriteRDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1291static int e1kRegWriteRDTR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1292static int e1kRegWriteTDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1293static int e1kRegReadMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1294static int e1kRegWriteMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1295static int e1kRegReadRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1296static int e1kRegWriteRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1297static int e1kRegReadVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1298static int e1kRegWriteVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1299
1300/**
1301 * Register map table.
1302 *
1303 * Override fn_read and fn_write to get register-specific behavior.
1304 */
1305const static struct E1kRegMap_st
1306{
1307 /** Register offset in the register space. */
1308 uint32_t offset;
1309 /** Size in bytes. Registers of size > 4 are in fact tables. */
1310 uint32_t size;
1311 /** Readable bits. */
1312 uint32_t readable;
1313 /** Writable bits. */
1314 uint32_t writable;
1315 /** Read callback. */
1316 int (*pfnRead)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1317 /** Write callback. */
1318 int (*pfnWrite)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1319 /** Abbreviated name. */
1320 const char *abbrev;
1321 /** Full name. */
1322 const char *name;
1323} s_e1kRegMap[E1K_NUM_OF_REGS] =
1324{
1325 /* offset size read mask write mask read callback write callback abbrev full name */
1326 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1327 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1328 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1329 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1330 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1331 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1332 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1333 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1334 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1335 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1336 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1337 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1338 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1339 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1340 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1341 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1342 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1343 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1344 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1345 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1346 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1347 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1348 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1349 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1350 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1351 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1352 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1353 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1354 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1355 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1356 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1357 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1358 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1359 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1360 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1361 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1362 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1363 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1364 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1365 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1366 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1367 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1368 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1369 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1370 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1371 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1372 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1373 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1374 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1375 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1376 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1377 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1378 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1379 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1380 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1381 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1382 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1383 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1384 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1385 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1386 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1387 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1388 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1389 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1390 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1391 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1392 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1393 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1394 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1395 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1396 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1397 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1398 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1399 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1400 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1401 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1402 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1403 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1404 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1405 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1406 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1407 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1408 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1409 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1410 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1411 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1412 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1413 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1414 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1415 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1416 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1417 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1418 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1419 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1420 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1421 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1422 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1423 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1424 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1425 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1426 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1427 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1428 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1429 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1430 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1431 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1432 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1433 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1434 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1435 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1436 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1437 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1438 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1439 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1440 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1441 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1442 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1443 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1444 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1445 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1446 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1447 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1448 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1449 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1450 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1451 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1452 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1453 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1454 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1455 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1456 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1457 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1458 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n) (82542)" },
1459 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n) (82542)" },
1460 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n) (82542)" }
1461};
1462
1463#ifdef DEBUG
1464
1465/**
1466 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1467 *
1468 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1469 *
1470 * @returns The buffer.
1471 *
1472 * @param u32 The word to convert into string.
1473 * @param mask Selects which bytes to convert.
1474 * @param buf Where to put the result.
1475 */
1476static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1477{
1478 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1479 {
1480 if (mask & 0xF)
1481 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1482 else
1483 *ptr = '.';
1484 }
1485 buf[8] = 0;
1486 return buf;
1487}
1488
1489/**
1490 * Returns timer name for debug purposes.
1491 *
1492 * @returns The timer name.
1493 *
1494 * @param pState The device state structure.
1495 * @param pTimer The timer to get the name for.
1496 */
1497DECLINLINE(const char *) e1kGetTimerName(E1KSTATE *pState, PTMTIMER pTimer)
1498{
1499 if (pTimer == pState->CTX_SUFF(pTIDTimer))
1500 return "TID";
1501 if (pTimer == pState->CTX_SUFF(pTADTimer))
1502 return "TAD";
1503 if (pTimer == pState->CTX_SUFF(pRIDTimer))
1504 return "RID";
1505 if (pTimer == pState->CTX_SUFF(pRADTimer))
1506 return "RAD";
1507 if (pTimer == pState->CTX_SUFF(pIntTimer))
1508 return "Int";
1509 if (pTimer == pState->CTX_SUFF(pTXDTimer))
1510 return "TXD";
1511 return "unknown";
1512}
1513
1514#endif /* DEBUG */
1515
1516/**
1517 * Arm a timer.
1518 *
1519 * @param pState Pointer to the device state structure.
1520 * @param pTimer Pointer to the timer.
1521 * @param uExpireIn Expiration interval in microseconds.
1522 */
1523DECLINLINE(void) e1kArmTimer(E1KSTATE *pState, PTMTIMER pTimer, uint32_t uExpireIn)
1524{
1525 if (pState->fLocked)
1526 return;
1527
1528 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1529 INSTANCE(pState), e1kGetTimerName(pState, pTimer), uExpireIn));
1530 TMTimerSetMicro(pTimer, uExpireIn);
1531}
1532
1533/**
1534 * Cancel a timer.
1535 *
1536 * @param pState Pointer to the device state structure.
1537 * @param pTimer Pointer to the timer.
1538 */
1539DECLINLINE(void) e1kCancelTimer(E1KSTATE *pState, PTMTIMER pTimer)
1540{
1541 E1kLog2(("%s Stopping %s timer...\n",
1542 INSTANCE(pState), e1kGetTimerName(pState, pTimer)));
1543 int rc = TMTimerStop(pTimer);
1544 if (RT_FAILURE(rc))
1545 {
1546 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1547 INSTANCE(pState), rc));
1548 }
1549}
1550
1551#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1552#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1553
1554#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1555#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1556#define e1kCsRxIsOwner(ps) PDMCritSectIsOwner(&ps->csRx)
1557
1558#ifndef E1K_WITH_TX_CS
1559# define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1560# define e1kCsTxLeave(ps) do { } while (0)
1561# define e1kCsIsOwner(cs) true
1562#else /* E1K_WITH_TX_CS */
1563# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1564# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1565# define e1kCsIsOwner(cs) PDMCritSectIsOwner(cs)
1566#endif /* E1K_WITH_TX_CS */
1567
1568#ifdef IN_RING3
1569
1570/**
1571 * Wakeup the RX thread.
1572 */
1573static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1574{
1575 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
1576 if ( pState->fMaybeOutOfSpace
1577 && pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1578 {
1579 STAM_COUNTER_INC(&pState->StatRxOverflowWakeup);
1580 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", INSTANCE(pState)));
1581 RTSemEventSignal(pState->hEventMoreRxDescAvail);
1582 }
1583}
1584
1585/**
1586 * Hardware reset. Revert all registers to initial values.
1587 *
1588 * @param pState The device state structure.
1589 */
1590static void e1kHardReset(E1KSTATE *pState)
1591{
1592 E1kLog(("%s Hard reset triggered\n", INSTANCE(pState)));
1593 memset(pState->auRegs, 0, sizeof(pState->auRegs));
1594 memset(pState->aRecAddr.au32, 0, sizeof(pState->aRecAddr.au32));
1595#ifdef E1K_INIT_RA0
1596 memcpy(pState->aRecAddr.au32, pState->macConfigured.au8,
1597 sizeof(pState->macConfigured.au8));
1598 pState->aRecAddr.array[0].ctl |= RA_CTL_AV;
1599#endif /* E1K_INIT_RA0 */
1600 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1601 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1602 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1603 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1604 Assert(GET_BITS(RCTL, BSIZE) == 0);
1605 pState->u16RxBSize = 2048;
1606
1607 /* Reset promiscuous mode */
1608 if (pState->pDrvR3)
1609 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, false);
1610
1611#ifdef E1K_WITH_TXD_CACHE
1612 int rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
1613 if (RT_LIKELY(rc == VINF_SUCCESS))
1614 {
1615 pState->nTxDFetched = 0;
1616 pState->iTxDCurrent = 0;
1617 pState->fGSO = false;
1618 pState->cbTxAlloc = 0;
1619 e1kCsTxLeave(pState);
1620 }
1621#endif /* E1K_WITH_TXD_CACHE */
1622#ifdef E1K_WITH_RXD_CACHE
1623 if (RT_LIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1624 {
1625 pState->iRxDCurrent = pState->nRxDFetched = 0;
1626 e1kCsRxLeave(pState);
1627 }
1628#endif /* E1K_WITH_RXD_CACHE */
1629}
1630
1631#endif /* IN_RING3 */
1632
1633/**
1634 * Compute Internet checksum.
1635 *
1636 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1637 *
1638 * @param pState The device state structure.
1639 * @param cpPacket The packet.
1640 * @param cb The size of the packet.
1641 * @param cszText A string denoting direction of packet transfer.
1642 *
1643 * @return The 1's complement of the 1's complement sum.
1644 *
1645 * @thread E1000_TX
1646 */
1647static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1648{
1649 uint32_t csum = 0;
1650 uint16_t *pu16 = (uint16_t *)pvBuf;
1651
1652 while (cb > 1)
1653 {
1654 csum += *pu16++;
1655 cb -= 2;
1656 }
1657 if (cb)
1658 csum += *(uint8_t*)pu16;
1659 while (csum >> 16)
1660 csum = (csum >> 16) + (csum & 0xFFFF);
1661 return ~csum;
1662}
1663
1664/**
1665 * Dump a packet to debug log.
1666 *
1667 * @param pState The device state structure.
1668 * @param cpPacket The packet.
1669 * @param cb The size of the packet.
1670 * @param cszText A string denoting direction of packet transfer.
1671 * @thread E1000_TX
1672 */
1673DECLINLINE(void) e1kPacketDump(E1KSTATE* pState, const uint8_t *cpPacket, size_t cb, const char *cszText)
1674{
1675#ifdef DEBUG
1676 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1677 {
1678 E1kLog(("%s --- %s packet #%d: ---\n",
1679 INSTANCE(pState), cszText, ++pState->u32PktNo));
1680 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1681 e1kCsLeave(pState);
1682 }
1683#else
1684 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1685 {
1686 E1kLogRel(("E1000: %s packet #%d, seq=%x ack=%x\n", cszText, pState->u32PktNo++, ntohl(*(uint32_t*)(cpPacket+0x26)), ntohl(*(uint32_t*)(cpPacket+0x2A))));
1687 e1kCsLeave(pState);
1688 }
1689#endif
1690}
1691
1692/**
1693 * Determine the type of transmit descriptor.
1694 *
1695 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1696 *
1697 * @param pDesc Pointer to descriptor union.
1698 * @thread E1000_TX
1699 */
1700DECLINLINE(int) e1kGetDescType(E1KTXDESC* pDesc)
1701{
1702 if (pDesc->legacy.cmd.fDEXT)
1703 return pDesc->context.dw2.u4DTYP;
1704 return E1K_DTYP_LEGACY;
1705}
1706
1707/**
1708 * Dump receive descriptor to debug log.
1709 *
1710 * @param pState The device state structure.
1711 * @param pDesc Pointer to the descriptor.
1712 * @thread E1000_RX
1713 */
1714static void e1kPrintRDesc(E1KSTATE* pState, E1KRXDESC* pDesc)
1715{
1716 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", INSTANCE(pState), pDesc->u16Length));
1717 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1718 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1719 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1720 pDesc->status.fPIF ? "PIF" : "pif",
1721 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1722 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1723 pDesc->status.fVP ? "VP" : "vp",
1724 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1725 pDesc->status.fEOP ? "EOP" : "eop",
1726 pDesc->status.fDD ? "DD" : "dd",
1727 pDesc->status.fRXE ? "RXE" : "rxe",
1728 pDesc->status.fIPE ? "IPE" : "ipe",
1729 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1730 pDesc->status.fCE ? "CE" : "ce",
1731 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1732 E1K_SPEC_VLAN(pDesc->status.u16Special),
1733 E1K_SPEC_PRI(pDesc->status.u16Special)));
1734}
1735
1736/**
1737 * Dump transmit descriptor to debug log.
1738 *
1739 * @param pState The device state structure.
1740 * @param pDesc Pointer to descriptor union.
1741 * @param cszDir A string denoting direction of descriptor transfer
1742 * @thread E1000_TX
1743 */
1744static void e1kPrintTDesc(E1KSTATE* pState, E1KTXDESC* pDesc, const char* cszDir,
1745 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1746{
1747 /*
1748 * Unfortunately we cannot use our format handler here, we want R0 logging
1749 * as well.
1750 */
1751 switch (e1kGetDescType(pDesc))
1752 {
1753 case E1K_DTYP_CONTEXT:
1754 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1755 INSTANCE(pState), cszDir, cszDir));
1756 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1757 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1758 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1759 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1760 pDesc->context.dw2.fIDE ? " IDE":"",
1761 pDesc->context.dw2.fRS ? " RS" :"",
1762 pDesc->context.dw2.fTSE ? " TSE":"",
1763 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1764 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1765 pDesc->context.dw2.u20PAYLEN,
1766 pDesc->context.dw3.u8HDRLEN,
1767 pDesc->context.dw3.u16MSS,
1768 pDesc->context.dw3.fDD?"DD":""));
1769 break;
1770 case E1K_DTYP_DATA:
1771 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1772 INSTANCE(pState), cszDir, pDesc->data.cmd.u20DTALEN, cszDir));
1773 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1774 pDesc->data.u64BufAddr,
1775 pDesc->data.cmd.u20DTALEN));
1776 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1777 pDesc->data.cmd.fIDE ? " IDE" :"",
1778 pDesc->data.cmd.fVLE ? " VLE" :"",
1779 pDesc->data.cmd.fRPS ? " RPS" :"",
1780 pDesc->data.cmd.fRS ? " RS" :"",
1781 pDesc->data.cmd.fTSE ? " TSE" :"",
1782 pDesc->data.cmd.fIFCS? " IFCS":"",
1783 pDesc->data.cmd.fEOP ? " EOP" :"",
1784 pDesc->data.dw3.fDD ? " DD" :"",
1785 pDesc->data.dw3.fEC ? " EC" :"",
1786 pDesc->data.dw3.fLC ? " LC" :"",
1787 pDesc->data.dw3.fTXSM? " TXSM":"",
1788 pDesc->data.dw3.fIXSM? " IXSM":"",
1789 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1790 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1791 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1792 break;
1793 case E1K_DTYP_LEGACY:
1794 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1795 INSTANCE(pState), cszDir, pDesc->legacy.cmd.u16Length, cszDir));
1796 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1797 pDesc->data.u64BufAddr,
1798 pDesc->legacy.cmd.u16Length));
1799 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1800 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1801 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1802 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1803 pDesc->legacy.cmd.fRS ? " RS" :"",
1804 pDesc->legacy.cmd.fIC ? " IC" :"",
1805 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1806 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1807 pDesc->legacy.dw3.fDD ? " DD" :"",
1808 pDesc->legacy.dw3.fEC ? " EC" :"",
1809 pDesc->legacy.dw3.fLC ? " LC" :"",
1810 pDesc->legacy.cmd.u8CSO,
1811 pDesc->legacy.dw3.u8CSS,
1812 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1813 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1814 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1815 break;
1816 default:
1817 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1818 INSTANCE(pState), cszDir, cszDir));
1819 break;
1820 }
1821}
1822
1823/**
1824 * Raise interrupt if not masked.
1825 *
1826 * @param pState The device state structure.
1827 */
1828static int e1kRaiseInterrupt(E1KSTATE *pState, int rcBusy, uint32_t u32IntCause = 0)
1829{
1830 int rc = e1kCsEnter(pState, rcBusy);
1831 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1832 return rc;
1833
1834 E1K_INC_ISTAT_CNT(pState->uStatIntTry);
1835 ICR |= u32IntCause;
1836 if (ICR & IMS)
1837 {
1838#if 0
1839 if (pState->fDelayInts)
1840 {
1841 E1K_INC_ISTAT_CNT(pState->uStatIntDly);
1842 pState->iStatIntLostOne = 1;
1843 E1kLog2(("%s e1kRaiseInterrupt: Delayed. ICR=%08x\n",
1844 INSTANCE(pState), ICR));
1845#define E1K_LOST_IRQ_THRSLD 20
1846//#define E1K_LOST_IRQ_THRSLD 200000000
1847 if (pState->iStatIntLost >= E1K_LOST_IRQ_THRSLD)
1848 {
1849 E1kLog2(("%s WARNING! Disabling delayed interrupt logic: delayed=%d, delivered=%d\n",
1850 INSTANCE(pState), pState->uStatIntDly, pState->uStatIntLate));
1851 pState->fIntMaskUsed = false;
1852 pState->uStatDisDly++;
1853 }
1854 }
1855 else
1856#endif
1857 if (pState->fIntRaised)
1858 {
1859 E1K_INC_ISTAT_CNT(pState->uStatIntSkip);
1860 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1861 INSTANCE(pState), ICR & IMS));
1862 }
1863 else
1864 {
1865#ifdef E1K_ITR_ENABLED
1866 uint64_t tstamp = TMTimerGet(pState->CTX_SUFF(pIntTimer));
1867 /* interrupts/sec = 1 / (256 * 10E-9 * ITR) */
1868 E1kLog2(("%s e1kRaiseInterrupt: tstamp - pState->u64AckedAt = %d, ITR * 256 = %d\n",
1869 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1870 //if (!!ITR && pState->fIntMaskUsed && tstamp - pState->u64AckedAt < ITR * 256)
1871 if (!!ITR && tstamp - pState->u64AckedAt < ITR * 256 && !(ICR & ICR_RXT0))
1872 {
1873 E1K_INC_ISTAT_CNT(pState->uStatIntEarly);
1874 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1875 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1876 }
1877 else
1878#endif
1879 {
1880
1881 /* Since we are delivering the interrupt now
1882 * there is no need to do it later -- stop the timer.
1883 */
1884 TMTimerStop(pState->CTX_SUFF(pIntTimer));
1885 E1K_INC_ISTAT_CNT(pState->uStatInt);
1886 STAM_COUNTER_INC(&pState->StatIntsRaised);
1887 /* Got at least one unmasked interrupt cause */
1888 pState->fIntRaised = true;
1889 /* Raise(1) INTA(0) */
1890 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1891 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
1892 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1893 INSTANCE(pState), ICR & IMS));
1894 }
1895 }
1896 }
1897 else
1898 {
1899 E1K_INC_ISTAT_CNT(pState->uStatIntMasked);
1900 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1901 INSTANCE(pState), ICR, IMS));
1902 }
1903 e1kCsLeave(pState);
1904 return VINF_SUCCESS;
1905}
1906
1907/**
1908 * Compute the physical address of the descriptor.
1909 *
1910 * @returns the physical address of the descriptor.
1911 *
1912 * @param baseHigh High-order 32 bits of descriptor table address.
1913 * @param baseLow Low-order 32 bits of descriptor table address.
1914 * @param idxDesc The descriptor index in the table.
1915 */
1916DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1917{
1918 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1919 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1920}
1921
1922/**
1923 * Advance the head pointer of the receive descriptor queue.
1924 *
1925 * @remarks RDH always points to the next available RX descriptor.
1926 *
1927 * @param pState The device state structure.
1928 */
1929DECLINLINE(void) e1kAdvanceRDH(E1KSTATE *pState)
1930{
1931 Assert(e1kCsRxIsOwner(pState));
1932 //e1kCsEnter(pState, RT_SRC_POS);
1933 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1934 RDH = 0;
1935 /*
1936 * Compute current receive queue length and fire RXDMT0 interrupt
1937 * if we are low on receive buffers
1938 */
1939 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1940 /*
1941 * The minimum threshold is controlled by RDMTS bits of RCTL:
1942 * 00 = 1/2 of RDLEN
1943 * 01 = 1/4 of RDLEN
1944 * 10 = 1/8 of RDLEN
1945 * 11 = reserved
1946 */
1947 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1948 if (uRQueueLen <= uMinRQThreshold)
1949 {
1950 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
1951 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
1952 INSTANCE(pState), RDH, RDT, uRQueueLen, uMinRQThreshold));
1953 E1K_INC_ISTAT_CNT(pState->uStatIntRXDMT0);
1954 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXDMT0);
1955 }
1956 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
1957 INSTANCE(pState), RDH, RDT, uRQueueLen));
1958 //e1kCsLeave(pState);
1959}
1960
1961#ifdef E1K_WITH_RXD_CACHE
1962/**
1963 * Return the number of RX descriptor that belong to the hardware.
1964 *
1965 * @returns the number of available descriptors in RX ring.
1966 * @param pState The device state structure.
1967 * @thread ???
1968 */
1969DECLINLINE(uint32_t) e1kGetRxLen(E1KSTATE* pState)
1970{
1971 /**
1972 * Make sure RDT won't change during computation. EMT may modify RDT at
1973 * any moment.
1974 */
1975 uint32_t rdt = RDT;
1976 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1977}
1978
1979DECLINLINE(unsigned) e1kRxDInCache(E1KSTATE* pState)
1980{
1981 return pState->nRxDFetched > pState->iRxDCurrent ?
1982 pState->nRxDFetched - pState->iRxDCurrent : 0;
1983}
1984
1985DECLINLINE(unsigned) e1kRxDIsCacheEmpty(E1KSTATE* pState)
1986{
1987 return pState->iRxDCurrent >= pState->nRxDFetched;
1988}
1989
1990/**
1991 * Load receive descriptors from guest memory. The caller needs to be in Rx
1992 * critical section.
1993 *
1994 * We need two physical reads in case the tail wrapped around the end of RX
1995 * descriptor ring.
1996 *
1997 * @returns the actual number of descriptors fetched.
1998 * @param pState The device state structure.
1999 * @param pDesc Pointer to descriptor union.
2000 * @param addr Physical address in guest context.
2001 * @thread EMT, RX
2002 */
2003DECLINLINE(unsigned) e1kRxDPrefetch(E1KSTATE* pState)
2004{
2005 /* We've already loaded pState->nRxDFetched descriptors past RDH. */
2006 unsigned nDescsAvailable = e1kGetRxLen(pState) - e1kRxDInCache(pState);
2007 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pState->nRxDFetched);
2008 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
2009 Assert(nDescsTotal != 0);
2010 if (nDescsTotal == 0)
2011 return 0;
2012 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pState)) % nDescsTotal;
2013 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
2014 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
2015 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
2016 INSTANCE(pState), nDescsAvailable, nDescsToFetch, nDescsTotal,
2017 nFirstNotLoaded, nDescsInSingleRead));
2018 if (nDescsToFetch == 0)
2019 return 0;
2020 E1KRXDESC* pFirstEmptyDesc = &pState->aRxDescriptors[pState->nRxDFetched];
2021 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
2022 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
2023 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
2024 // uint64_t addrBase = ((uint64_t)RDBAH << 32) + RDBAL;
2025 // unsigned i, j;
2026 // for (i = pState->nRxDFetched; i < pState->nRxDFetched + nDescsInSingleRead; ++i)
2027 // {
2028 // pState->aRxDescAddr[i] = addrBase + (nFirstNotLoaded + i - pState->nRxDFetched) * sizeof(E1KRXDESC);
2029 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", INSTANCE(pState), i, pState->aRxDescAddr[i]));
2030 // }
2031 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
2032 INSTANCE(pState), nDescsInSingleRead,
2033 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
2034 nFirstNotLoaded, RDLEN, RDH, RDT));
2035 if (nDescsToFetch > nDescsInSingleRead)
2036 {
2037 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
2038 ((uint64_t)RDBAH << 32) + RDBAL,
2039 pFirstEmptyDesc + nDescsInSingleRead,
2040 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
2041 // Assert(i == pState->nRxDFetched + nDescsInSingleRead);
2042 // for (j = 0; i < pState->nRxDFetched + nDescsToFetch; ++i, ++j)
2043 // {
2044 // pState->aRxDescAddr[i] = addrBase + j * sizeof(E1KRXDESC);
2045 // E1kLog3(("%s aRxDescAddr[%d] = %p\n", INSTANCE(pState), i, pState->aRxDescAddr[i]));
2046 // }
2047 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
2048 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
2049 RDBAH, RDBAL));
2050 }
2051 pState->nRxDFetched += nDescsToFetch;
2052 return nDescsToFetch;
2053}
2054
2055/**
2056 * Obtain the next RX descriptor from RXD cache, fetching descriptors from the
2057 * RX ring if the cache is empty.
2058 *
2059 * Note that we cannot advance the cache pointer (iRxDCurrent) yet as it will
2060 * go out of sync with RDH which will cause trouble when EMT checks if the
2061 * cache is empty to do pre-fetch @bugref(6217).
2062 *
2063 * @param pState The device state structure.
2064 * @thread RX
2065 */
2066DECLINLINE(E1KRXDESC*) e1kRxDGet(E1KSTATE* pState)
2067{
2068 Assert(e1kCsRxIsOwner(pState));
2069 /* Check the cache first. */
2070 if (pState->iRxDCurrent < pState->nRxDFetched)
2071 return &pState->aRxDescriptors[pState->iRxDCurrent];
2072 /* Cache is empty, reset it and check if we can fetch more. */
2073 pState->iRxDCurrent = pState->nRxDFetched = 0;
2074 if (e1kRxDPrefetch(pState))
2075 return &pState->aRxDescriptors[pState->iRxDCurrent];
2076 /* Out of Rx descriptors. */
2077 return NULL;
2078}
2079
2080/**
2081 * Return the RX descriptor obtained with e1kRxDGet() and advance the cache
2082 * pointer. The descriptor gets written back to the RXD ring.
2083 *
2084 * @param pState The device state structure.
2085 * @param pDesc The descriptor being "returned" to the RX ring.
2086 * @thread RX
2087 */
2088DECLINLINE(void) e1kRxDPut(E1KSTATE* pState, E1KRXDESC* pDesc)
2089{
2090 Assert(e1kCsRxIsOwner(pState));
2091 pState->iRxDCurrent++;
2092 // Assert(pDesc >= pState->aRxDescriptors);
2093 // Assert(pDesc < pState->aRxDescriptors + E1K_RXD_CACHE_SIZE);
2094 // uint64_t addr = e1kDescAddr(RDBAH, RDBAL, RDH);
2095 // uint32_t rdh = RDH;
2096 // Assert(pState->aRxDescAddr[pDesc - pState->aRxDescriptors] == addr);
2097 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns),
2098 e1kDescAddr(RDBAH, RDBAL, RDH),
2099 pDesc, sizeof(E1KRXDESC));
2100 e1kAdvanceRDH(pState);
2101 e1kPrintRDesc(pState, pDesc);
2102}
2103
2104/**
2105 * Store a fragment of received packet at the specifed address.
2106 *
2107 * @param pState The device state structure.
2108 * @param pDesc The next available RX descriptor.
2109 * @param pvBuf The fragment.
2110 * @param cb The size of the fragment.
2111 */
2112static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2113{
2114 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
2115 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2116 INSTANCE(pState), cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2117 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2118 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2119 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
2120}
2121
2122#else /* !E1K_WITH_RXD_CACHE */
2123
2124/**
2125 * Store a fragment of received packet that fits into the next available RX
2126 * buffer.
2127 *
2128 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2129 *
2130 * @param pState The device state structure.
2131 * @param pDesc The next available RX descriptor.
2132 * @param pvBuf The fragment.
2133 * @param cb The size of the fragment.
2134 */
2135static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2136{
2137 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
2138 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pState->szInstance, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2139 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2140 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2141 /* Write back the descriptor */
2142 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2143 e1kPrintRDesc(pState, pDesc);
2144 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2145 /* Advance head */
2146 e1kAdvanceRDH(pState);
2147 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", INSTANCE(pState), pDesc->fEOP, RDTR, RADV));
2148 if (pDesc->status.fEOP)
2149 {
2150 /* Complete packet has been stored -- it is time to let the guest know. */
2151#ifdef E1K_USE_RX_TIMERS
2152 if (RDTR)
2153 {
2154 /* Arm the timer to fire in RDTR usec (discard .024) */
2155 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
2156 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2157 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
2158 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
2159 }
2160 else
2161 {
2162#endif
2163 /* 0 delay means immediate interrupt */
2164 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
2165 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
2166#ifdef E1K_USE_RX_TIMERS
2167 }
2168#endif
2169 }
2170 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
2171}
2172#endif /* !E1K_WITH_RXD_CACHE */
2173
2174/**
2175 * Returns true if it is a broadcast packet.
2176 *
2177 * @returns true if destination address indicates broadcast.
2178 * @param pvBuf The ethernet packet.
2179 */
2180DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2181{
2182 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2183 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2184}
2185
2186/**
2187 * Returns true if it is a multicast packet.
2188 *
2189 * @remarks returns true for broadcast packets as well.
2190 * @returns true if destination address indicates multicast.
2191 * @param pvBuf The ethernet packet.
2192 */
2193DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2194{
2195 return (*(char*)pvBuf) & 1;
2196}
2197
2198/**
2199 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2200 *
2201 * @remarks We emulate checksum offloading for major packets types only.
2202 *
2203 * @returns VBox status code.
2204 * @param pState The device state structure.
2205 * @param pFrame The available data.
2206 * @param cb Number of bytes available in the buffer.
2207 * @param status Bit fields containing status info.
2208 */
2209static int e1kRxChecksumOffload(E1KSTATE* pState, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2210{
2211 /** @todo
2212 * It is not safe to bypass checksum verification for packets coming
2213 * from real wire. We currently unable to tell where packets are
2214 * coming from so we tell the driver to ignore our checksum flags
2215 * and do verification in software.
2216 */
2217#if 0
2218 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2219
2220 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", INSTANCE(pState), uEtherType));
2221
2222 switch (uEtherType)
2223 {
2224 case 0x800: /* IPv4 */
2225 {
2226 pStatus->fIXSM = false;
2227 pStatus->fIPCS = true;
2228 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2229 /* TCP/UDP checksum offloading works with TCP and UDP only */
2230 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2231 break;
2232 }
2233 case 0x86DD: /* IPv6 */
2234 pStatus->fIXSM = false;
2235 pStatus->fIPCS = false;
2236 pStatus->fTCPCS = true;
2237 break;
2238 default: /* ARP, VLAN, etc. */
2239 pStatus->fIXSM = true;
2240 break;
2241 }
2242#else
2243 pStatus->fIXSM = true;
2244#endif
2245 return VINF_SUCCESS;
2246}
2247
2248/**
2249 * Pad and store received packet.
2250 *
2251 * @remarks Make sure that the packet appears to upper layer as one coming
2252 * from real Ethernet: pad it and insert FCS.
2253 *
2254 * @returns VBox status code.
2255 * @param pState The device state structure.
2256 * @param pvBuf The available data.
2257 * @param cb Number of bytes available in the buffer.
2258 * @param status Bit fields containing status info.
2259 */
2260static int e1kHandleRxPacket(E1KSTATE* pState, const void *pvBuf, size_t cb, E1KRXDST status)
2261{
2262#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2263 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2264 uint8_t *ptr = rxPacket;
2265
2266 int rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2267 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2268 return rc;
2269
2270 if (cb > 70) /* unqualified guess */
2271 pState->led.Asserted.s.fReading = pState->led.Actual.s.fReading = 1;
2272
2273 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2274 Assert(cb > 16);
2275 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2276 E1kLog3(("%s Max RX packet size is %u\n", INSTANCE(pState), cbMax));
2277 if (status.fVP)
2278 {
2279 /* VLAN packet -- strip VLAN tag in VLAN mode */
2280 if ((CTRL & CTRL_VME) && cb > 16)
2281 {
2282 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2283 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2284 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2285 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2286 cb -= 4;
2287 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2288 INSTANCE(pState), status.u16Special, cb));
2289 }
2290 else
2291 status.fVP = false; /* Set VP only if we stripped the tag */
2292 }
2293 else
2294 memcpy(rxPacket, pvBuf, cb);
2295 /* Pad short packets */
2296 if (cb < 60)
2297 {
2298 memset(rxPacket + cb, 0, 60 - cb);
2299 cb = 60;
2300 }
2301 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2302 {
2303 STAM_PROFILE_ADV_START(&pState->StatReceiveCRC, a);
2304 /*
2305 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2306 * is ignored by most of drivers we may as well save us the trouble
2307 * of calculating it (see EthernetCRC CFGM parameter).
2308 */
2309 if (pState->fEthernetCRC)
2310 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2311 cb += sizeof(uint32_t);
2312 STAM_PROFILE_ADV_STOP(&pState->StatReceiveCRC, a);
2313 E1kLog3(("%s Added FCS (cb=%u)\n", INSTANCE(pState), cb));
2314 }
2315 /* Compute checksum of complete packet */
2316 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2317 e1kRxChecksumOffload(pState, rxPacket, cb, &status);
2318
2319 /* Update stats */
2320 E1K_INC_CNT32(GPRC);
2321 if (e1kIsBroadcast(pvBuf))
2322 E1K_INC_CNT32(BPRC);
2323 else if (e1kIsMulticast(pvBuf))
2324 E1K_INC_CNT32(MPRC);
2325 /* Update octet receive counter */
2326 E1K_ADD_CNT64(GORCL, GORCH, cb);
2327 STAM_REL_COUNTER_ADD(&pState->StatReceiveBytes, cb);
2328 if (cb == 64)
2329 E1K_INC_CNT32(PRC64);
2330 else if (cb < 128)
2331 E1K_INC_CNT32(PRC127);
2332 else if (cb < 256)
2333 E1K_INC_CNT32(PRC255);
2334 else if (cb < 512)
2335 E1K_INC_CNT32(PRC511);
2336 else if (cb < 1024)
2337 E1K_INC_CNT32(PRC1023);
2338 else
2339 E1K_INC_CNT32(PRC1522);
2340
2341 E1K_INC_ISTAT_CNT(pState->uStatRxFrm);
2342
2343#ifdef E1K_WITH_RXD_CACHE
2344 while (cb > 0)
2345 {
2346 E1KRXDESC *pDesc = e1kRxDGet(pState);
2347
2348 if (pDesc == NULL)
2349 {
2350 E1kLog(("%s Out of receive buffers, dropping the packet "
2351 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2352 INSTANCE(pState), cb, e1kRxDInCache(pState), RDH, RDT));
2353 break;
2354 }
2355#else /* !E1K_WITH_RXD_CACHE */
2356 if (RDH == RDT)
2357 {
2358 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2359 INSTANCE(pState)));
2360 }
2361 /* Store the packet to receive buffers */
2362 while (RDH != RDT)
2363 {
2364 /* Load the descriptor pointed by head */
2365 E1KRXDESC desc, *pDesc = &desc;
2366 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2367 &desc, sizeof(desc));
2368#endif /* !E1K_WITH_RXD_CACHE */
2369 if (pDesc->u64BufAddr)
2370 {
2371 /* Update descriptor */
2372 pDesc->status = status;
2373 pDesc->u16Checksum = checksum;
2374 pDesc->status.fDD = true;
2375
2376 /*
2377 * We need to leave Rx critical section here or we risk deadlocking
2378 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2379 * page or has an access handler associated with it.
2380 * Note that it is safe to leave the critical section here since
2381 * e1kRegWriteRDT() never modifies RDH. It never touches already
2382 * fetched RxD cache entries either.
2383 */
2384 if (cb > pState->u16RxBSize)
2385 {
2386 pDesc->status.fEOP = false;
2387 e1kCsRxLeave(pState);
2388 e1kStoreRxFragment(pState, pDesc, ptr, pState->u16RxBSize);
2389 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2390 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2391 return rc;
2392 ptr += pState->u16RxBSize;
2393 cb -= pState->u16RxBSize;
2394 }
2395 else
2396 {
2397 pDesc->status.fEOP = true;
2398 e1kCsRxLeave(pState);
2399 e1kStoreRxFragment(pState, pDesc, ptr, cb);
2400#ifdef E1K_WITH_RXD_CACHE
2401 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2402 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2403 return rc;
2404 cb = 0;
2405#else /* !E1K_WITH_RXD_CACHE */
2406 pState->led.Actual.s.fReading = 0;
2407 return VINF_SUCCESS;
2408#endif /* !E1K_WITH_RXD_CACHE */
2409 }
2410 /*
2411 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2412 * is not defined.
2413 */
2414 }
2415#ifndef E1K_WITH_RXD_CACHE
2416 else
2417 {
2418#endif /* !E1K_WITH_RXD_CACHE */
2419 /* Write back the descriptor. */
2420 pDesc->status.fDD = true;
2421 e1kRxDPut(pState, pDesc);
2422#ifndef E1K_WITH_RXD_CACHE
2423 }
2424#endif /* !E1K_WITH_RXD_CACHE */
2425 }
2426
2427 if (cb > 0)
2428 E1kLog(("%s Out of receive buffers, dropping %u bytes", INSTANCE(pState), cb));
2429
2430 pState->led.Actual.s.fReading = 0;
2431
2432 e1kCsRxLeave(pState);
2433#ifdef E1K_WITH_RXD_CACHE
2434 /* Complete packet has been stored -- it is time to let the guest know. */
2435# ifdef E1K_USE_RX_TIMERS
2436 if (RDTR)
2437 {
2438 /* Arm the timer to fire in RDTR usec (discard .024) */
2439 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
2440 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2441 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
2442 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
2443 }
2444 else
2445 {
2446# endif /* E1K_USE_RX_TIMERS */
2447 /* 0 delay means immediate interrupt */
2448 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
2449 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
2450# ifdef E1K_USE_RX_TIMERS
2451 }
2452# endif /* E1K_USE_RX_TIMERS */
2453#endif /* E1K_WITH_RXD_CACHE */
2454
2455 return VINF_SUCCESS;
2456#else
2457 return VERR_INTERNAL_ERROR_2;
2458#endif
2459}
2460
2461
2462/**
2463 * Bring the link up after the configured delay, 5 seconds by default.
2464 *
2465 * @param pState The device state structure.
2466 * @thread any
2467 */
2468DECLINLINE(void) e1kBringLinkUpDelayed(E1KSTATE* pState)
2469{
2470 E1kLog(("%s Will bring up the link in %d seconds...\n",
2471 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
2472 e1kArmTimer(pState, pState->CTX_SUFF(pLUTimer), pState->cMsLinkUpDelay * 1000);
2473}
2474
2475#if 0 /* unused */
2476/**
2477 * Read handler for Device Status register.
2478 *
2479 * Get the link status from PHY.
2480 *
2481 * @returns VBox status code.
2482 *
2483 * @param pState The device state structure.
2484 * @param offset Register offset in memory-mapped frame.
2485 * @param index Register index in register array.
2486 * @param mask Used to implement partial reads (8 and 16-bit).
2487 */
2488static int e1kRegReadCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2489{
2490 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2491 INSTANCE(pState), (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2492 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2493 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2494 {
2495 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2496 if (Phy::readMDIO(&pState->phy))
2497 *pu32Value = CTRL | CTRL_MDIO;
2498 else
2499 *pu32Value = CTRL & ~CTRL_MDIO;
2500 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2501 INSTANCE(pState), !!(*pu32Value & CTRL_MDIO)));
2502 }
2503 else
2504 {
2505 /* MDIO pin is used for output, ignore it */
2506 *pu32Value = CTRL;
2507 }
2508 return VINF_SUCCESS;
2509}
2510#endif /* unused */
2511
2512/**
2513 * Write handler for Device Control register.
2514 *
2515 * Handles reset.
2516 *
2517 * @param pState The device state structure.
2518 * @param offset Register offset in memory-mapped frame.
2519 * @param index Register index in register array.
2520 * @param value The value to store.
2521 * @param mask Used to implement partial writes (8 and 16-bit).
2522 * @thread EMT
2523 */
2524static int e1kRegWriteCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2525{
2526 int rc = VINF_SUCCESS;
2527
2528 if (value & CTRL_RESET)
2529 { /* RST */
2530#ifndef IN_RING3
2531 return VINF_IOM_R3_IOPORT_WRITE;
2532#else
2533 e1kHardReset(pState);
2534#endif
2535 }
2536 else
2537 {
2538 if ( (value & CTRL_SLU)
2539 && pState->fCableConnected
2540 && !(STATUS & STATUS_LU))
2541 {
2542 /* The driver indicates that we should bring up the link */
2543 /* Do so in 5 seconds (by default). */
2544 e1kBringLinkUpDelayed(pState);
2545 /*
2546 * Change the status (but not PHY status) anyway as Windows expects
2547 * it for 82543GC.
2548 */
2549 STATUS |= STATUS_LU;
2550 }
2551 if (value & CTRL_VME)
2552 {
2553 E1kLog(("%s VLAN Mode Enabled\n", INSTANCE(pState)));
2554 }
2555 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2556 INSTANCE(pState), (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2557 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2558 if (value & CTRL_MDC)
2559 {
2560 if (value & CTRL_MDIO_DIR)
2561 {
2562 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", INSTANCE(pState), !!(value & CTRL_MDIO)));
2563 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2564 Phy::writeMDIO(&pState->phy, !!(value & CTRL_MDIO));
2565 }
2566 else
2567 {
2568 if (Phy::readMDIO(&pState->phy))
2569 value |= CTRL_MDIO;
2570 else
2571 value &= ~CTRL_MDIO;
2572 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2573 INSTANCE(pState), !!(value & CTRL_MDIO)));
2574 }
2575 }
2576 rc = e1kRegWriteDefault(pState, offset, index, value);
2577 }
2578
2579 return rc;
2580}
2581
2582/**
2583 * Write handler for EEPROM/Flash Control/Data register.
2584 *
2585 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2586 *
2587 * @param pState The device state structure.
2588 * @param offset Register offset in memory-mapped frame.
2589 * @param index Register index in register array.
2590 * @param value The value to store.
2591 * @param mask Used to implement partial writes (8 and 16-bit).
2592 * @thread EMT
2593 */
2594static int e1kRegWriteEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2595{
2596#ifdef IN_RING3
2597 /* So far we are concerned with lower byte only */
2598 if ((EECD & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2599 {
2600 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2601 /* Note: 82543GC does not need to request EEPROM access */
2602 STAM_PROFILE_ADV_START(&pState->StatEEPROMWrite, a);
2603 pState->eeprom.write(value & EECD_EE_WIRES);
2604 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMWrite, a);
2605 }
2606 if (value & EECD_EE_REQ)
2607 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2608 else
2609 EECD &= ~EECD_EE_GNT;
2610 //e1kRegWriteDefault(pState, offset, index, value );
2611
2612 return VINF_SUCCESS;
2613#else /* !IN_RING3 */
2614 return VINF_IOM_R3_MMIO_WRITE;
2615#endif /* !IN_RING3 */
2616}
2617
2618/**
2619 * Read handler for EEPROM/Flash Control/Data register.
2620 *
2621 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2622 *
2623 * @returns VBox status code.
2624 *
2625 * @param pState The device state structure.
2626 * @param offset Register offset in memory-mapped frame.
2627 * @param index Register index in register array.
2628 * @param mask Used to implement partial reads (8 and 16-bit).
2629 * @thread EMT
2630 */
2631static int e1kRegReadEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2632{
2633#ifdef IN_RING3
2634 uint32_t value;
2635 int rc = e1kRegReadDefault(pState, offset, index, &value);
2636 if (RT_SUCCESS(rc))
2637 {
2638 if ((value & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2639 {
2640 /* Note: 82543GC does not need to request EEPROM access */
2641 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2642 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2643 value |= pState->eeprom.read();
2644 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2645 }
2646 *pu32Value = value;
2647 }
2648
2649 return rc;
2650#else /* !IN_RING3 */
2651 return VINF_IOM_R3_MMIO_READ;
2652#endif /* !IN_RING3 */
2653}
2654
2655/**
2656 * Write handler for EEPROM Read register.
2657 *
2658 * Handles EEPROM word access requests, reads EEPROM and stores the result
2659 * into DATA field.
2660 *
2661 * @param pState The device state structure.
2662 * @param offset Register offset in memory-mapped frame.
2663 * @param index Register index in register array.
2664 * @param value The value to store.
2665 * @param mask Used to implement partial writes (8 and 16-bit).
2666 * @thread EMT
2667 */
2668static int e1kRegWriteEERD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2669{
2670#ifdef IN_RING3
2671 /* Make use of 'writable' and 'readable' masks. */
2672 e1kRegWriteDefault(pState, offset, index, value);
2673 /* DONE and DATA are set only if read was triggered by START. */
2674 if (value & EERD_START)
2675 {
2676 uint16_t tmp;
2677 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2678 if (pState->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2679 SET_BITS(EERD, DATA, tmp);
2680 EERD |= EERD_DONE;
2681 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2682 }
2683
2684 return VINF_SUCCESS;
2685#else /* !IN_RING3 */
2686 return VINF_IOM_R3_MMIO_WRITE;
2687#endif /* !IN_RING3 */
2688}
2689
2690
2691/**
2692 * Write handler for MDI Control register.
2693 *
2694 * Handles PHY read/write requests; forwards requests to internal PHY device.
2695 *
2696 * @param pState The device state structure.
2697 * @param offset Register offset in memory-mapped frame.
2698 * @param index Register index in register array.
2699 * @param value The value to store.
2700 * @param mask Used to implement partial writes (8 and 16-bit).
2701 * @thread EMT
2702 */
2703static int e1kRegWriteMDIC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2704{
2705 if (value & MDIC_INT_EN)
2706 {
2707 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2708 INSTANCE(pState)));
2709 }
2710 else if (value & MDIC_READY)
2711 {
2712 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2713 INSTANCE(pState)));
2714 }
2715 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2716 {
2717 E1kLog(("%s ERROR! Access to invalid PHY detected, phy=%d.\n",
2718 INSTANCE(pState), GET_BITS_V(value, MDIC, PHY)));
2719 }
2720 else
2721 {
2722 /* Store the value */
2723 e1kRegWriteDefault(pState, offset, index, value);
2724 STAM_COUNTER_INC(&pState->StatPHYAccesses);
2725 /* Forward op to PHY */
2726 if (value & MDIC_OP_READ)
2727 SET_BITS(MDIC, DATA, Phy::readRegister(&pState->phy, GET_BITS_V(value, MDIC, REG)));
2728 else
2729 Phy::writeRegister(&pState->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2730 /* Let software know that we are done */
2731 MDIC |= MDIC_READY;
2732 }
2733
2734 return VINF_SUCCESS;
2735}
2736
2737/**
2738 * Write handler for Interrupt Cause Read register.
2739 *
2740 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2741 *
2742 * @param pState The device state structure.
2743 * @param offset Register offset in memory-mapped frame.
2744 * @param index Register index in register array.
2745 * @param value The value to store.
2746 * @param mask Used to implement partial writes (8 and 16-bit).
2747 * @thread EMT
2748 */
2749static int e1kRegWriteICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2750{
2751 ICR &= ~value;
2752
2753 return VINF_SUCCESS;
2754}
2755
2756/**
2757 * Read handler for Interrupt Cause Read register.
2758 *
2759 * Reading this register acknowledges all interrupts.
2760 *
2761 * @returns VBox status code.
2762 *
2763 * @param pState The device state structure.
2764 * @param offset Register offset in memory-mapped frame.
2765 * @param index Register index in register array.
2766 * @param mask Not used.
2767 * @thread EMT
2768 */
2769static int e1kRegReadICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2770{
2771 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_READ);
2772 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2773 return rc;
2774
2775 uint32_t value = 0;
2776 rc = e1kRegReadDefault(pState, offset, index, &value);
2777 if (RT_SUCCESS(rc))
2778 {
2779 if (value)
2780 {
2781 /*
2782 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2783 * with disabled interrupts.
2784 */
2785 //if (IMS)
2786 if (1)
2787 {
2788 /*
2789 * Interrupts were enabled -- we are supposedly at the very
2790 * beginning of interrupt handler
2791 */
2792 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2793 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", INSTANCE(pState), ICR));
2794 /* Clear all pending interrupts */
2795 ICR = 0;
2796 pState->fIntRaised = false;
2797 /* Lower(0) INTA(0) */
2798 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2799
2800 pState->u64AckedAt = TMTimerGet(pState->CTX_SUFF(pIntTimer));
2801 if (pState->fIntMaskUsed)
2802 pState->fDelayInts = true;
2803 }
2804 else
2805 {
2806 /*
2807 * Interrupts are disabled -- in windows guests ICR read is done
2808 * just before re-enabling interrupts
2809 */
2810 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", INSTANCE(pState), ICR));
2811 }
2812 }
2813 *pu32Value = value;
2814 }
2815 e1kCsLeave(pState);
2816
2817 return rc;
2818}
2819
2820/**
2821 * Write handler for Interrupt Cause Set register.
2822 *
2823 * Bits corresponding to 1s in 'value' will be set in ICR register.
2824 *
2825 * @param pState The device state structure.
2826 * @param offset Register offset in memory-mapped frame.
2827 * @param index Register index in register array.
2828 * @param value The value to store.
2829 * @param mask Used to implement partial writes (8 and 16-bit).
2830 * @thread EMT
2831 */
2832static int e1kRegWriteICS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2833{
2834 E1K_INC_ISTAT_CNT(pState->uStatIntICS);
2835 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, value & s_e1kRegMap[ICS_IDX].writable);
2836}
2837
2838/**
2839 * Write handler for Interrupt Mask Set register.
2840 *
2841 * Will trigger pending interrupts.
2842 *
2843 * @param pState The device state structure.
2844 * @param offset Register offset in memory-mapped frame.
2845 * @param index Register index in register array.
2846 * @param value The value to store.
2847 * @param mask Used to implement partial writes (8 and 16-bit).
2848 * @thread EMT
2849 */
2850static int e1kRegWriteIMS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2851{
2852 IMS |= value;
2853 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2854 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", INSTANCE(pState)));
2855 /* Mask changes, we need to raise pending interrupts. */
2856 if ((ICR & IMS) && !pState->fLocked)
2857 {
2858 E1kLog2(("%s e1kRegWriteIMS: IRQ pending (%08x), arming late int timer...\n",
2859 INSTANCE(pState), ICR));
2860 /* Raising an interrupt immediately causes win7 to hang upon NIC reconfiguration, see @bugref{5023}. */
2861 TMTimerSet(pState->CTX_SUFF(pIntTimer), TMTimerFromNano(pState->CTX_SUFF(pIntTimer), ITR * 256) +
2862 TMTimerGet(pState->CTX_SUFF(pIntTimer)));
2863 }
2864
2865 return VINF_SUCCESS;
2866}
2867
2868/**
2869 * Write handler for Interrupt Mask Clear register.
2870 *
2871 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2872 *
2873 * @param pState The device state structure.
2874 * @param offset Register offset in memory-mapped frame.
2875 * @param index Register index in register array.
2876 * @param value The value to store.
2877 * @param mask Used to implement partial writes (8 and 16-bit).
2878 * @thread EMT
2879 */
2880static int e1kRegWriteIMC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2881{
2882 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2883 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2884 return rc;
2885 if (pState->fIntRaised)
2886 {
2887 /*
2888 * Technically we should reset fIntRaised in ICR read handler, but it will cause
2889 * Windows to freeze since it may receive an interrupt while still in the very beginning
2890 * of interrupt handler.
2891 */
2892 E1K_INC_ISTAT_CNT(pState->uStatIntLower);
2893 STAM_COUNTER_INC(&pState->StatIntsPrevented);
2894 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
2895 /* Lower(0) INTA(0) */
2896 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2897 pState->fIntRaised = false;
2898 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", INSTANCE(pState), ICR));
2899 }
2900 IMS &= ~value;
2901 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", INSTANCE(pState)));
2902 e1kCsLeave(pState);
2903
2904 return VINF_SUCCESS;
2905}
2906
2907/**
2908 * Write handler for Receive Control register.
2909 *
2910 * @param pState The device state structure.
2911 * @param offset Register offset in memory-mapped frame.
2912 * @param index Register index in register array.
2913 * @param value The value to store.
2914 * @param mask Used to implement partial writes (8 and 16-bit).
2915 * @thread EMT
2916 */
2917static int e1kRegWriteRCTL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2918{
2919 /* Update promiscuous mode */
2920 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
2921 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
2922 {
2923 /* Promiscuity has changed, pass the knowledge on. */
2924#ifndef IN_RING3
2925 return VINF_IOM_R3_IOPORT_WRITE;
2926#else
2927 if (pState->pDrvR3)
2928 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, fBecomePromiscous);
2929#endif
2930 }
2931
2932 /* Adjust receive buffer size */
2933 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
2934 if (value & RCTL_BSEX)
2935 cbRxBuf *= 16;
2936 if (cbRxBuf != pState->u16RxBSize)
2937 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
2938 INSTANCE(pState), cbRxBuf, pState->u16RxBSize));
2939 pState->u16RxBSize = cbRxBuf;
2940
2941 /* Update the register */
2942 e1kRegWriteDefault(pState, offset, index, value);
2943
2944 return VINF_SUCCESS;
2945}
2946
2947/**
2948 * Write handler for Packet Buffer Allocation register.
2949 *
2950 * TXA = 64 - RXA.
2951 *
2952 * @param pState The device state structure.
2953 * @param offset Register offset in memory-mapped frame.
2954 * @param index Register index in register array.
2955 * @param value The value to store.
2956 * @param mask Used to implement partial writes (8 and 16-bit).
2957 * @thread EMT
2958 */
2959static int e1kRegWritePBA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2960{
2961 e1kRegWriteDefault(pState, offset, index, value);
2962 PBA_st->txa = 64 - PBA_st->rxa;
2963
2964 return VINF_SUCCESS;
2965}
2966
2967/**
2968 * Write handler for Receive Descriptor Tail register.
2969 *
2970 * @remarks Write into RDT forces switch to HC and signal to
2971 * e1kNetworkDown_WaitReceiveAvail().
2972 *
2973 * @returns VBox status code.
2974 *
2975 * @param pState The device state structure.
2976 * @param offset Register offset in memory-mapped frame.
2977 * @param index Register index in register array.
2978 * @param value The value to store.
2979 * @param mask Used to implement partial writes (8 and 16-bit).
2980 * @thread EMT
2981 */
2982static int e1kRegWriteRDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2983{
2984#ifndef IN_RING3
2985 /* XXX */
2986// return VINF_IOM_R3_MMIO_WRITE;
2987#endif
2988 int rc = e1kCsRxEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2989 if (RT_LIKELY(rc == VINF_SUCCESS))
2990 {
2991 E1kLog(("%s e1kRegWriteRDT\n", INSTANCE(pState)));
2992 rc = e1kRegWriteDefault(pState, offset, index, value);
2993#ifdef E1K_WITH_RXD_CACHE
2994 /*
2995 * We need to fetch descriptors now as RDT may go whole circle
2996 * before we attempt to store a received packet. For example,
2997 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
2998 * size being only 8 descriptors! Note that we fetch descriptors
2999 * only when the cache is empty to reduce the number of memory reads
3000 * in case of frequent RDT writes. Don't fetch anything when the
3001 * receiver is disabled either as RDH, RDT, RDLEN can be in some
3002 * messed up state.
3003 * Note that despite the cache may seem empty, meaning that there are
3004 * no more available descriptors in it, it may still be used by RX
3005 * thread which has not yet written the last descriptor back but has
3006 * temporarily released the RX lock in order to write the packet body
3007 * to descriptor's buffer. At this point we still going to do prefetch
3008 * but it won't actually fetch anything if there are no unused slots in
3009 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
3010 * reset the cache here even if it appears empty. It will be reset at
3011 * a later point in e1kRxDGet().
3012 */
3013 if (e1kRxDIsCacheEmpty(pState) && (RCTL & RCTL_EN))
3014 e1kRxDPrefetch(pState);
3015#endif /* E1K_WITH_RXD_CACHE */
3016 e1kCsRxLeave(pState);
3017 if (RT_SUCCESS(rc))
3018 {
3019/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
3020 * without requiring any context switches. We should also check the
3021 * wait condition before bothering to queue the item as we're currently
3022 * queuing thousands of items per second here in a normal transmit
3023 * scenario. Expect performance changes when fixing this! */
3024#ifdef IN_RING3
3025 /* Signal that we have more receive descriptors available. */
3026 e1kWakeupReceive(pState->CTX_SUFF(pDevIns));
3027#else
3028 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pCanRxQueue));
3029 if (pItem)
3030 PDMQueueInsert(pState->CTX_SUFF(pCanRxQueue), pItem);
3031#endif
3032 }
3033 }
3034 return rc;
3035}
3036
3037/**
3038 * Write handler for Receive Delay Timer register.
3039 *
3040 * @param pState The device state structure.
3041 * @param offset Register offset in memory-mapped frame.
3042 * @param index Register index in register array.
3043 * @param value The value to store.
3044 * @param mask Used to implement partial writes (8 and 16-bit).
3045 * @thread EMT
3046 */
3047static int e1kRegWriteRDTR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
3048{
3049 e1kRegWriteDefault(pState, offset, index, value);
3050 if (value & RDTR_FPD)
3051 {
3052 /* Flush requested, cancel both timers and raise interrupt */
3053#ifdef E1K_USE_RX_TIMERS
3054 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
3055 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
3056#endif
3057 E1K_INC_ISTAT_CNT(pState->uStatIntRDTR);
3058 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
3059 }
3060
3061 return VINF_SUCCESS;
3062}
3063
3064DECLINLINE(uint32_t) e1kGetTxLen(E1KSTATE* pState)
3065{
3066 /**
3067 * Make sure TDT won't change during computation. EMT may modify TDT at
3068 * any moment.
3069 */
3070 uint32_t tdt = TDT;
3071 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
3072}
3073
3074#ifdef IN_RING3
3075#ifdef E1K_TX_DELAY
3076
3077/**
3078 * Transmit Delay Timer handler.
3079 *
3080 * @remarks We only get here when the timer expires.
3081 *
3082 * @param pDevIns Pointer to device instance structure.
3083 * @param pTimer Pointer to the timer.
3084 * @param pvUser NULL.
3085 * @thread EMT
3086 */
3087static DECLCALLBACK(void) e1kTxDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3088{
3089 E1KSTATE *pState = (E1KSTATE *)pvUser;
3090 Assert(e1kCsIsOwner(&pState->csTx));
3091
3092 E1K_INC_ISTAT_CNT(pState->uStatTxDelayExp);
3093#ifdef E1K_INT_STATS
3094 uint64_t u64Elapsed = RTTimeNanoTS() - pState->u64ArmedAt;
3095 if (u64Elapsed > pState->uStatMaxTxDelay)
3096 pState->uStatMaxTxDelay = u64Elapsed;
3097#endif /* E1K_INT_STATS */
3098 int rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
3099 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
3100}
3101#endif /* E1K_TX_DELAY */
3102
3103#ifdef E1K_USE_TX_TIMERS
3104
3105/**
3106 * Transmit Interrupt Delay Timer handler.
3107 *
3108 * @remarks We only get here when the timer expires.
3109 *
3110 * @param pDevIns Pointer to device instance structure.
3111 * @param pTimer Pointer to the timer.
3112 * @param pvUser NULL.
3113 * @thread EMT
3114 */
3115static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3116{
3117 E1KSTATE *pState = (E1KSTATE *)pvUser;
3118
3119 E1K_INC_ISTAT_CNT(pState->uStatTID);
3120 /* Cancel absolute delay timer as we have already got attention */
3121#ifndef E1K_NO_TAD
3122 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
3123#endif /* E1K_NO_TAD */
3124 e1kRaiseInterrupt(pState, ICR_TXDW);
3125}
3126
3127/**
3128 * Transmit Absolute Delay Timer handler.
3129 *
3130 * @remarks We only get here when the timer expires.
3131 *
3132 * @param pDevIns Pointer to device instance structure.
3133 * @param pTimer Pointer to the timer.
3134 * @param pvUser NULL.
3135 * @thread EMT
3136 */
3137static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3138{
3139 E1KSTATE *pState = (E1KSTATE *)pvUser;
3140
3141 E1K_INC_ISTAT_CNT(pState->uStatTAD);
3142 /* Cancel interrupt delay timer as we have already got attention */
3143 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
3144 e1kRaiseInterrupt(pState, ICR_TXDW);
3145}
3146
3147#endif /* E1K_USE_TX_TIMERS */
3148#ifdef E1K_USE_RX_TIMERS
3149
3150/**
3151 * Receive Interrupt Delay Timer handler.
3152 *
3153 * @remarks We only get here when the timer expires.
3154 *
3155 * @param pDevIns Pointer to device instance structure.
3156 * @param pTimer Pointer to the timer.
3157 * @param pvUser NULL.
3158 * @thread EMT
3159 */
3160static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3161{
3162 E1KSTATE *pState = (E1KSTATE *)pvUser;
3163
3164 E1K_INC_ISTAT_CNT(pState->uStatRID);
3165 /* Cancel absolute delay timer as we have already got attention */
3166 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
3167 e1kRaiseInterrupt(pState, ICR_RXT0);
3168}
3169
3170/**
3171 * Receive Absolute Delay Timer handler.
3172 *
3173 * @remarks We only get here when the timer expires.
3174 *
3175 * @param pDevIns Pointer to device instance structure.
3176 * @param pTimer Pointer to the timer.
3177 * @param pvUser NULL.
3178 * @thread EMT
3179 */
3180static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3181{
3182 E1KSTATE *pState = (E1KSTATE *)pvUser;
3183
3184 E1K_INC_ISTAT_CNT(pState->uStatRAD);
3185 /* Cancel interrupt delay timer as we have already got attention */
3186 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
3187 e1kRaiseInterrupt(pState, ICR_RXT0);
3188}
3189
3190#endif /* E1K_USE_RX_TIMERS */
3191
3192/**
3193 * Late Interrupt Timer handler.
3194 *
3195 * @param pDevIns Pointer to device instance structure.
3196 * @param pTimer Pointer to the timer.
3197 * @param pvUser NULL.
3198 * @thread EMT
3199 */
3200static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3201{
3202 E1KSTATE *pState = (E1KSTATE *)pvUser;
3203
3204 STAM_PROFILE_ADV_START(&pState->StatLateIntTimer, a);
3205 STAM_COUNTER_INC(&pState->StatLateInts);
3206 E1K_INC_ISTAT_CNT(pState->uStatIntLate);
3207#if 0
3208 if (pState->iStatIntLost > -100)
3209 pState->iStatIntLost--;
3210#endif
3211 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, 0);
3212 STAM_PROFILE_ADV_STOP(&pState->StatLateIntTimer, a);
3213}
3214
3215/**
3216 * Link Up Timer handler.
3217 *
3218 * @param pDevIns Pointer to device instance structure.
3219 * @param pTimer Pointer to the timer.
3220 * @param pvUser NULL.
3221 * @thread EMT
3222 */
3223static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3224{
3225 E1KSTATE *pState = (E1KSTATE *)pvUser;
3226
3227 /*
3228 * This can happen if we set the link status to down when the Link up timer was
3229 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3230 * and connect+disconnect the cable very quick.
3231 */
3232 if (!pState->fCableConnected)
3233 return;
3234
3235 E1kLog(("%s e1kLinkUpTimer: Link is up\n", INSTANCE(pState)));
3236 STATUS |= STATUS_LU;
3237 Phy::setLinkStatus(&pState->phy, true);
3238 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
3239}
3240
3241#endif /* IN_RING3 */
3242
3243/**
3244 * Sets up the GSO context according to the TSE new context descriptor.
3245 *
3246 * @param pGso The GSO context to setup.
3247 * @param pCtx The context descriptor.
3248 */
3249DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3250{
3251 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3252
3253 /*
3254 * See if the context descriptor describes something that could be TCP or
3255 * UDP over IPv[46].
3256 */
3257 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3258 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3259 {
3260 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3261 return;
3262 }
3263 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3264 {
3265 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3266 return;
3267 }
3268 if (RT_UNLIKELY( pCtx->dw2.fTCP
3269 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3270 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3271 {
3272 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3273 return;
3274 }
3275
3276 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3277 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3278 {
3279 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3280 return;
3281 }
3282
3283 /* IPv4 checksum offset. */
3284 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3285 {
3286 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3287 return;
3288 }
3289
3290 /* TCP/UDP checksum offsets. */
3291 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3292 != ( pCtx->dw2.fTCP
3293 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3294 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3295 {
3296 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3297 return;
3298 }
3299
3300 /*
3301 * Because of internal networking using a 16-bit size field for GSO context
3302 * plus frame, we have to make sure we don't exceed this.
3303 */
3304 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3305 {
3306 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3307 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3308 return;
3309 }
3310
3311 /*
3312 * We're good for now - we'll do more checks when seeing the data.
3313 * So, figure the type of offloading and setup the context.
3314 */
3315 if (pCtx->dw2.fIP)
3316 {
3317 if (pCtx->dw2.fTCP)
3318 {
3319 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3320 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3321 }
3322 else
3323 {
3324 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3325 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3326 }
3327 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3328 * this yet it seems)... */
3329 }
3330 else
3331 {
3332 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /* @todo IPv6 UFO */
3333 if (pCtx->dw2.fTCP)
3334 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3335 else
3336 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3337 }
3338 pGso->offHdr1 = pCtx->ip.u8CSS;
3339 pGso->offHdr2 = pCtx->tu.u8CSS;
3340 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3341 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3342 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3343 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3344 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3345}
3346
3347/**
3348 * Checks if we can use GSO processing for the current TSE frame.
3349 *
3350 * @param pState The device state structure.
3351 * @param pGso The GSO context.
3352 * @param pData The first data descriptor of the frame.
3353 * @param pCtx The TSO context descriptor.
3354 */
3355DECLINLINE(bool) e1kCanDoGso(E1KSTATE *pState, PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3356{
3357 if (!pData->cmd.fTSE)
3358 {
3359 E1kLog2(("e1kCanDoGso: !TSE\n"));
3360 return false;
3361 }
3362 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3363 {
3364 E1kLog(("e1kCanDoGso: VLE\n"));
3365 return false;
3366 }
3367 if (RT_UNLIKELY(!pState->fGSOEnabled))
3368 {
3369 E1kLog3(("e1kCanDoGso: GSO disabled via CFGM\n"));
3370 return false;
3371 }
3372
3373 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3374 {
3375 case PDMNETWORKGSOTYPE_IPV4_TCP:
3376 case PDMNETWORKGSOTYPE_IPV4_UDP:
3377 if (!pData->dw3.fIXSM)
3378 {
3379 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3380 return false;
3381 }
3382 if (!pData->dw3.fTXSM)
3383 {
3384 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3385 return false;
3386 }
3387 /** @todo what more check should we perform here? Ethernet frame type? */
3388 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3389 return true;
3390
3391 case PDMNETWORKGSOTYPE_IPV6_TCP:
3392 case PDMNETWORKGSOTYPE_IPV6_UDP:
3393 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3394 {
3395 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3396 return false;
3397 }
3398 if (!pData->dw3.fTXSM)
3399 {
3400 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3401 return false;
3402 }
3403 /** @todo what more check should we perform here? Ethernet frame type? */
3404 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3405 return true;
3406
3407 default:
3408 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3409 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3410 return false;
3411 }
3412}
3413
3414/**
3415 * Frees the current xmit buffer.
3416 *
3417 * @param pState The device state structure.
3418 */
3419static void e1kXmitFreeBuf(E1KSTATE *pState)
3420{
3421 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3422 if (pSg)
3423 {
3424 pState->CTX_SUFF(pTxSg) = NULL;
3425
3426 if (pSg->pvAllocator != pState)
3427 {
3428 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3429 if (pDrv)
3430 pDrv->pfnFreeBuf(pDrv, pSg);
3431 }
3432 else
3433 {
3434 /* loopback */
3435 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3436 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3437 pSg->fFlags = 0;
3438 pSg->pvAllocator = NULL;
3439 }
3440 }
3441}
3442
3443#ifndef E1K_WITH_TXD_CACHE
3444/**
3445 * Allocates an xmit buffer.
3446 *
3447 * @returns See PDMINETWORKUP::pfnAllocBuf.
3448 * @param pState The device state structure.
3449 * @param cbMin The minimum frame size.
3450 * @param fExactSize Whether cbMin is exact or if we have to max it
3451 * out to the max MTU size.
3452 * @param fGso Whether this is a GSO frame or not.
3453 */
3454DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, size_t cbMin, bool fExactSize, bool fGso)
3455{
3456 /* Adjust cbMin if necessary. */
3457 if (!fExactSize)
3458 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3459
3460 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3461 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3462 e1kXmitFreeBuf(pState);
3463 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3464
3465 /*
3466 * Allocate the buffer.
3467 */
3468 PPDMSCATTERGATHER pSg;
3469 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3470 {
3471 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3472 if (RT_UNLIKELY(!pDrv))
3473 return VERR_NET_DOWN;
3474 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pState->GsoCtx : NULL, &pSg);
3475 if (RT_FAILURE(rc))
3476 {
3477 /* Suspend TX as we are out of buffers atm */
3478 STATUS |= STATUS_TXOFF;
3479 return rc;
3480 }
3481 }
3482 else
3483 {
3484 /* Create a loopback using the fallback buffer and preallocated SG. */
3485 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3486 pSg = &pState->uTxFallback.Sg;
3487 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3488 pSg->cbUsed = 0;
3489 pSg->cbAvailable = 0;
3490 pSg->pvAllocator = pState;
3491 pSg->pvUser = NULL; /* No GSO here. */
3492 pSg->cSegs = 1;
3493 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3494 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3495 }
3496
3497 pState->CTX_SUFF(pTxSg) = pSg;
3498 return VINF_SUCCESS;
3499}
3500#else /* E1K_WITH_TXD_CACHE */
3501/**
3502 * Allocates an xmit buffer.
3503 *
3504 * @returns See PDMINETWORKUP::pfnAllocBuf.
3505 * @param pState The device state structure.
3506 * @param cbMin The minimum frame size.
3507 * @param fExactSize Whether cbMin is exact or if we have to max it
3508 * out to the max MTU size.
3509 * @param fGso Whether this is a GSO frame or not.
3510 */
3511DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, bool fGso)
3512{
3513 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3514 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3515 e1kXmitFreeBuf(pState);
3516 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3517
3518 /*
3519 * Allocate the buffer.
3520 */
3521 PPDMSCATTERGATHER pSg;
3522 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3523 {
3524 if (pState->cbTxAlloc == 0)
3525 {
3526 /* Zero packet, no need for the buffer */
3527 return VINF_SUCCESS;
3528 }
3529
3530 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3531 if (RT_UNLIKELY(!pDrv))
3532 return VERR_NET_DOWN;
3533 int rc = pDrv->pfnAllocBuf(pDrv, pState->cbTxAlloc, fGso ? &pState->GsoCtx : NULL, &pSg);
3534 if (RT_FAILURE(rc))
3535 {
3536 /* Suspend TX as we are out of buffers atm */
3537 STATUS |= STATUS_TXOFF;
3538 return rc;
3539 }
3540 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3541 INSTANCE(pState), pState->cbTxAlloc,
3542 pState->fVTag ? "VLAN " : "",
3543 pState->fGSO ? "GSO " : ""));
3544 pState->cbTxAlloc = 0;
3545 }
3546 else
3547 {
3548 /* Create a loopback using the fallback buffer and preallocated SG. */
3549 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3550 pSg = &pState->uTxFallback.Sg;
3551 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3552 pSg->cbUsed = 0;
3553 pSg->cbAvailable = 0;
3554 pSg->pvAllocator = pState;
3555 pSg->pvUser = NULL; /* No GSO here. */
3556 pSg->cSegs = 1;
3557 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3558 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3559 }
3560
3561 pState->CTX_SUFF(pTxSg) = pSg;
3562 return VINF_SUCCESS;
3563}
3564#endif /* E1K_WITH_TXD_CACHE */
3565
3566/**
3567 * Checks if it's a GSO buffer or not.
3568 *
3569 * @returns true / false.
3570 * @param pTxSg The scatter / gather buffer.
3571 */
3572DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3573{
3574#if 0
3575 if (!pTxSg)
3576 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3577 if (pTxSg && pTxSg->pvUser)
3578 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3579#endif
3580 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3581}
3582
3583#ifndef E1K_WITH_TXD_CACHE
3584/**
3585 * Load transmit descriptor from guest memory.
3586 *
3587 * @param pState The device state structure.
3588 * @param pDesc Pointer to descriptor union.
3589 * @param addr Physical address in guest context.
3590 * @thread E1000_TX
3591 */
3592DECLINLINE(void) e1kLoadDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3593{
3594 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3595}
3596#else /* E1K_WITH_TXD_CACHE */
3597/**
3598 * Load transmit descriptors from guest memory.
3599 *
3600 * We need two physical reads in case the tail wrapped around the end of TX
3601 * descriptor ring.
3602 *
3603 * @returns the actual number of descriptors fetched.
3604 * @param pState The device state structure.
3605 * @param pDesc Pointer to descriptor union.
3606 * @param addr Physical address in guest context.
3607 * @thread E1000_TX
3608 */
3609DECLINLINE(unsigned) e1kTxDLoadMore(E1KSTATE* pState)
3610{
3611 Assert(pState->iTxDCurrent == 0);
3612 /* We've already loaded pState->nTxDFetched descriptors past TDH. */
3613 unsigned nDescsAvailable = e1kGetTxLen(pState) - pState->nTxDFetched;
3614 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pState->nTxDFetched);
3615 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3616 unsigned nFirstNotLoaded = (TDH + pState->nTxDFetched) % nDescsTotal;
3617 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3618 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3619 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3620 INSTANCE(pState), nDescsAvailable, nDescsToFetch, nDescsTotal,
3621 nFirstNotLoaded, nDescsInSingleRead));
3622 if (nDescsToFetch == 0)
3623 return 0;
3624 E1KTXDESC* pFirstEmptyDesc = &pState->aTxDescriptors[pState->nTxDFetched];
3625 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3626 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3627 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3628 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3629 INSTANCE(pState), nDescsInSingleRead,
3630 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3631 nFirstNotLoaded, TDLEN, TDH, TDT));
3632 if (nDescsToFetch > nDescsInSingleRead)
3633 {
3634 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3635 ((uint64_t)TDBAH << 32) + TDBAL,
3636 pFirstEmptyDesc + nDescsInSingleRead,
3637 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3638 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3639 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
3640 TDBAH, TDBAL));
3641 }
3642 pState->nTxDFetched += nDescsToFetch;
3643 return nDescsToFetch;
3644}
3645
3646/**
3647 * Load transmit descriptors from guest memory only if there are no loaded
3648 * descriptors.
3649 *
3650 * @returns true if there are descriptors in cache.
3651 * @param pState The device state structure.
3652 * @param pDesc Pointer to descriptor union.
3653 * @param addr Physical address in guest context.
3654 * @thread E1000_TX
3655 */
3656DECLINLINE(bool) e1kTxDLazyLoad(E1KSTATE* pState)
3657{
3658 if (pState->nTxDFetched == 0)
3659 return e1kTxDLoadMore(pState) != 0;
3660 return true;
3661}
3662#endif /* E1K_WITH_TXD_CACHE */
3663
3664/**
3665 * Write back transmit descriptor to guest memory.
3666 *
3667 * @param pState The device state structure.
3668 * @param pDesc Pointer to descriptor union.
3669 * @param addr Physical address in guest context.
3670 * @thread E1000_TX
3671 */
3672DECLINLINE(void) e1kWriteBackDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3673{
3674 /* Only the last half of the descriptor has to be written back. */
3675 e1kPrintTDesc(pState, pDesc, "^^^");
3676 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3677}
3678
3679/**
3680 * Transmit complete frame.
3681 *
3682 * @remarks We skip the FCS since we're not responsible for sending anything to
3683 * a real ethernet wire.
3684 *
3685 * @param pState The device state structure.
3686 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3687 * @thread E1000_TX
3688 */
3689static void e1kTransmitFrame(E1KSTATE* pState, bool fOnWorkerThread)
3690{
3691 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3692 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3693 Assert(!pSg || pSg->cSegs == 1);
3694
3695 if (cbFrame > 70) /* unqualified guess */
3696 pState->led.Asserted.s.fWriting = pState->led.Actual.s.fWriting = 1;
3697
3698#ifdef E1K_INT_STATS
3699 if (cbFrame <= 1514)
3700 E1K_INC_ISTAT_CNT(pState->uStatTx1514);
3701 else if (cbFrame <= 2962)
3702 E1K_INC_ISTAT_CNT(pState->uStatTx2962);
3703 else if (cbFrame <= 4410)
3704 E1K_INC_ISTAT_CNT(pState->uStatTx4410);
3705 else if (cbFrame <= 5858)
3706 E1K_INC_ISTAT_CNT(pState->uStatTx5858);
3707 else if (cbFrame <= 7306)
3708 E1K_INC_ISTAT_CNT(pState->uStatTx7306);
3709 else if (cbFrame <= 8754)
3710 E1K_INC_ISTAT_CNT(pState->uStatTx8754);
3711 else if (cbFrame <= 16384)
3712 E1K_INC_ISTAT_CNT(pState->uStatTx16384);
3713 else if (cbFrame <= 32768)
3714 E1K_INC_ISTAT_CNT(pState->uStatTx32768);
3715 else
3716 E1K_INC_ISTAT_CNT(pState->uStatTxLarge);
3717#endif /* E1K_INT_STATS */
3718
3719 /* Add VLAN tag */
3720 if (cbFrame > 12 && pState->fVTag)
3721 {
3722 E1kLog3(("%s Inserting VLAN tag %08x\n",
3723 INSTANCE(pState), RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16)));
3724 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3725 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16);
3726 pSg->cbUsed += 4;
3727 cbFrame += 4;
3728 Assert(pSg->cbUsed == cbFrame);
3729 Assert(pSg->cbUsed <= pSg->cbAvailable);
3730 }
3731/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3732 "%.*Rhxd\n"
3733 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3734 INSTANCE(pState), cbFrame, pSg->aSegs[0].pvSeg, INSTANCE(pState)));*/
3735
3736 /* Update the stats */
3737 E1K_INC_CNT32(TPT);
3738 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3739 E1K_INC_CNT32(GPTC);
3740 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3741 E1K_INC_CNT32(BPTC);
3742 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3743 E1K_INC_CNT32(MPTC);
3744 /* Update octet transmit counter */
3745 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3746 if (pState->CTX_SUFF(pDrv))
3747 STAM_REL_COUNTER_ADD(&pState->StatTransmitBytes, cbFrame);
3748 if (cbFrame == 64)
3749 E1K_INC_CNT32(PTC64);
3750 else if (cbFrame < 128)
3751 E1K_INC_CNT32(PTC127);
3752 else if (cbFrame < 256)
3753 E1K_INC_CNT32(PTC255);
3754 else if (cbFrame < 512)
3755 E1K_INC_CNT32(PTC511);
3756 else if (cbFrame < 1024)
3757 E1K_INC_CNT32(PTC1023);
3758 else
3759 E1K_INC_CNT32(PTC1522);
3760
3761 E1K_INC_ISTAT_CNT(pState->uStatTxFrm);
3762
3763 /*
3764 * Dump and send the packet.
3765 */
3766 int rc = VERR_NET_DOWN;
3767 if (pSg && pSg->pvAllocator != pState)
3768 {
3769 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3770
3771 pState->CTX_SUFF(pTxSg) = NULL;
3772 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3773 if (pDrv)
3774 {
3775 /* Release critical section to avoid deadlock in CanReceive */
3776 //e1kCsLeave(pState);
3777 STAM_PROFILE_START(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3778 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3779 STAM_PROFILE_STOP(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3780 //e1kCsEnter(pState, RT_SRC_POS);
3781 }
3782 }
3783 else if (pSg)
3784 {
3785 Assert(pSg->aSegs[0].pvSeg == pState->aTxPacketFallback);
3786 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3787
3788 /** @todo do we actually need to check that we're in loopback mode here? */
3789 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3790 {
3791 E1KRXDST status;
3792 RT_ZERO(status);
3793 status.fPIF = true;
3794 e1kHandleRxPacket(pState, pSg->aSegs[0].pvSeg, cbFrame, status);
3795 rc = VINF_SUCCESS;
3796 }
3797 e1kXmitFreeBuf(pState);
3798 }
3799 else
3800 rc = VERR_NET_DOWN;
3801 if (RT_FAILURE(rc))
3802 {
3803 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3804 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3805 }
3806
3807 pState->led.Actual.s.fWriting = 0;
3808}
3809
3810/**
3811 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3812 *
3813 * @param pState The device state structure.
3814 * @param pPkt Pointer to the packet.
3815 * @param u16PktLen Total length of the packet.
3816 * @param cso Offset in packet to write checksum at.
3817 * @param css Offset in packet to start computing
3818 * checksum from.
3819 * @param cse Offset in packet to stop computing
3820 * checksum at.
3821 * @thread E1000_TX
3822 */
3823static void e1kInsertChecksum(E1KSTATE* pState, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3824{
3825 if (css >= u16PktLen)
3826 {
3827 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3828 INSTANCE(pState), cso, u16PktLen));
3829 return;
3830 }
3831
3832 if (cso >= u16PktLen - 1)
3833 {
3834 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3835 INSTANCE(pState), cso, u16PktLen));
3836 return;
3837 }
3838
3839 if (cse == 0)
3840 cse = u16PktLen - 1;
3841 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3842 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", INSTANCE(pState),
3843 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3844 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3845}
3846
3847/**
3848 * Add a part of descriptor's buffer to transmit frame.
3849 *
3850 * @remarks data.u64BufAddr is used unconditionally for both data
3851 * and legacy descriptors since it is identical to
3852 * legacy.u64BufAddr.
3853 *
3854 * @param pState The device state structure.
3855 * @param pDesc Pointer to the descriptor to transmit.
3856 * @param u16Len Length of buffer to the end of segment.
3857 * @param fSend Force packet sending.
3858 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3859 * @thread E1000_TX
3860 */
3861#ifndef E1K_WITH_TXD_CACHE
3862static void e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3863{
3864 /* TCP header being transmitted */
3865 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3866 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3867 /* IP header being transmitted */
3868 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3869 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3870
3871 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3872 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3873 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3874
3875 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3876 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3877 E1kLog3(("%s Dump of the segment:\n"
3878 "%.*Rhxd\n"
3879 "%s --- End of dump ---\n",
3880 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3881 pState->u16TxPktLen += u16Len;
3882 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3883 INSTANCE(pState), pState->u16TxPktLen));
3884 if (pState->u16HdrRemain > 0)
3885 {
3886 /* The header was not complete, check if it is now */
3887 if (u16Len >= pState->u16HdrRemain)
3888 {
3889 /* The rest is payload */
3890 u16Len -= pState->u16HdrRemain;
3891 pState->u16HdrRemain = 0;
3892 /* Save partial checksum and flags */
3893 pState->u32SavedCsum = pTcpHdr->chksum;
3894 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
3895 /* Clear FIN and PSH flags now and set them only in the last segment */
3896 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
3897 }
3898 else
3899 {
3900 /* Still not */
3901 pState->u16HdrRemain -= u16Len;
3902 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
3903 INSTANCE(pState), pState->u16HdrRemain));
3904 return;
3905 }
3906 }
3907
3908 pState->u32PayRemain -= u16Len;
3909
3910 if (fSend)
3911 {
3912 /* Leave ethernet header intact */
3913 /* IP Total Length = payload + headers - ethernet header */
3914 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
3915 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
3916 INSTANCE(pState), ntohs(pIpHdr->total_len)));
3917 /* Update IP Checksum */
3918 pIpHdr->chksum = 0;
3919 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3920 pState->contextTSE.ip.u8CSO,
3921 pState->contextTSE.ip.u8CSS,
3922 pState->contextTSE.ip.u16CSE);
3923
3924 /* Update TCP flags */
3925 /* Restore original FIN and PSH flags for the last segment */
3926 if (pState->u32PayRemain == 0)
3927 {
3928 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
3929 E1K_INC_CNT32(TSCTC);
3930 }
3931 /* Add TCP length to partial pseudo header sum */
3932 uint32_t csum = pState->u32SavedCsum
3933 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
3934 while (csum >> 16)
3935 csum = (csum >> 16) + (csum & 0xFFFF);
3936 pTcpHdr->chksum = csum;
3937 /* Compute final checksum */
3938 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3939 pState->contextTSE.tu.u8CSO,
3940 pState->contextTSE.tu.u8CSS,
3941 pState->contextTSE.tu.u16CSE);
3942
3943 /*
3944 * Transmit it. If we've use the SG already, allocate a new one before
3945 * we copy of the data.
3946 */
3947 if (!pState->CTX_SUFF(pTxSg))
3948 e1kXmitAllocBuf(pState, pState->u16TxPktLen + (pState->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
3949 if (pState->CTX_SUFF(pTxSg))
3950 {
3951 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
3952 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
3953 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
3954 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
3955 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
3956 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
3957 }
3958 e1kTransmitFrame(pState, fOnWorkerThread);
3959
3960 /* Update Sequence Number */
3961 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
3962 - pState->contextTSE.dw3.u8HDRLEN);
3963 /* Increment IP identification */
3964 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
3965 }
3966}
3967#else /* E1K_WITH_TXD_CACHE */
3968static int e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3969{
3970 int rc = VINF_SUCCESS;
3971 /* TCP header being transmitted */
3972 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3973 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3974 /* IP header being transmitted */
3975 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3976 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3977
3978 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3979 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3980 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3981
3982 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3983 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3984 E1kLog3(("%s Dump of the segment:\n"
3985 "%.*Rhxd\n"
3986 "%s --- End of dump ---\n",
3987 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3988 pState->u16TxPktLen += u16Len;
3989 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3990 INSTANCE(pState), pState->u16TxPktLen));
3991 if (pState->u16HdrRemain > 0)
3992 {
3993 /* The header was not complete, check if it is now */
3994 if (u16Len >= pState->u16HdrRemain)
3995 {
3996 /* The rest is payload */
3997 u16Len -= pState->u16HdrRemain;
3998 pState->u16HdrRemain = 0;
3999 /* Save partial checksum and flags */
4000 pState->u32SavedCsum = pTcpHdr->chksum;
4001 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
4002 /* Clear FIN and PSH flags now and set them only in the last segment */
4003 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
4004 }
4005 else
4006 {
4007 /* Still not */
4008 pState->u16HdrRemain -= u16Len;
4009 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
4010 INSTANCE(pState), pState->u16HdrRemain));
4011 return rc;
4012 }
4013 }
4014
4015 pState->u32PayRemain -= u16Len;
4016
4017 if (fSend)
4018 {
4019 /* Leave ethernet header intact */
4020 /* IP Total Length = payload + headers - ethernet header */
4021 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
4022 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
4023 INSTANCE(pState), ntohs(pIpHdr->total_len)));
4024 /* Update IP Checksum */
4025 pIpHdr->chksum = 0;
4026 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
4027 pState->contextTSE.ip.u8CSO,
4028 pState->contextTSE.ip.u8CSS,
4029 pState->contextTSE.ip.u16CSE);
4030
4031 /* Update TCP flags */
4032 /* Restore original FIN and PSH flags for the last segment */
4033 if (pState->u32PayRemain == 0)
4034 {
4035 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
4036 E1K_INC_CNT32(TSCTC);
4037 }
4038 /* Add TCP length to partial pseudo header sum */
4039 uint32_t csum = pState->u32SavedCsum
4040 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
4041 while (csum >> 16)
4042 csum = (csum >> 16) + (csum & 0xFFFF);
4043 pTcpHdr->chksum = csum;
4044 /* Compute final checksum */
4045 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
4046 pState->contextTSE.tu.u8CSO,
4047 pState->contextTSE.tu.u8CSS,
4048 pState->contextTSE.tu.u16CSE);
4049
4050 /*
4051 * Transmit it.
4052 */
4053 if (pState->CTX_SUFF(pTxSg))
4054 {
4055 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
4056 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4057 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
4058 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
4059 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
4060 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
4061 }
4062 e1kTransmitFrame(pState, fOnWorkerThread);
4063
4064 /* Update Sequence Number */
4065 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
4066 - pState->contextTSE.dw3.u8HDRLEN);
4067 /* Increment IP identification */
4068 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
4069
4070 /* Allocate new buffer for the next segment. */
4071 if (pState->u32PayRemain)
4072 {
4073 pState->cbTxAlloc = RT_MIN(pState->u32PayRemain,
4074 pState->contextTSE.dw3.u16MSS)
4075 + pState->contextTSE.dw3.u8HDRLEN
4076 + (pState->fVTag ? 4 : 0);
4077 rc = e1kXmitAllocBuf(pState, false /* fGSO */);
4078 }
4079 }
4080
4081 return rc;
4082}
4083#endif /* E1K_WITH_TXD_CACHE */
4084
4085#ifndef E1K_WITH_TXD_CACHE
4086/**
4087 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4088 * frame.
4089 *
4090 * We construct the frame in the fallback buffer first and the copy it to the SG
4091 * buffer before passing it down to the network driver code.
4092 *
4093 * @returns true if the frame should be transmitted, false if not.
4094 *
4095 * @param pState The device state structure.
4096 * @param pDesc Pointer to the descriptor to transmit.
4097 * @param cbFragment Length of descriptor's buffer.
4098 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4099 * @thread E1000_TX
4100 */
4101static bool e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, uint32_t cbFragment, bool fOnWorkerThread)
4102{
4103 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
4104 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4105 Assert(pDesc->data.cmd.fTSE);
4106 Assert(!e1kXmitIsGsoBuf(pTxSg));
4107
4108 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
4109 Assert(u16MaxPktLen != 0);
4110 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4111
4112 /*
4113 * Carve out segments.
4114 */
4115 do
4116 {
4117 /* Calculate how many bytes we have left in this TCP segment */
4118 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
4119 if (cb > cbFragment)
4120 {
4121 /* This descriptor fits completely into current segment */
4122 cb = cbFragment;
4123 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4124 }
4125 else
4126 {
4127 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4128 /*
4129 * Rewind the packet tail pointer to the beginning of payload,
4130 * so we continue writing right beyond the header.
4131 */
4132 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
4133 }
4134
4135 pDesc->data.u64BufAddr += cb;
4136 cbFragment -= cb;
4137 } while (cbFragment > 0);
4138
4139 if (pDesc->data.cmd.fEOP)
4140 {
4141 /* End of packet, next segment will contain header. */
4142 if (pState->u32PayRemain != 0)
4143 E1K_INC_CNT32(TSCTFC);
4144 pState->u16TxPktLen = 0;
4145 e1kXmitFreeBuf(pState);
4146 }
4147
4148 return false;
4149}
4150#else /* E1K_WITH_TXD_CACHE */
4151/**
4152 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4153 * frame.
4154 *
4155 * We construct the frame in the fallback buffer first and the copy it to the SG
4156 * buffer before passing it down to the network driver code.
4157 *
4158 * @returns error code
4159 *
4160 * @param pState The device state structure.
4161 * @param pDesc Pointer to the descriptor to transmit.
4162 * @param cbFragment Length of descriptor's buffer.
4163 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4164 * @thread E1000_TX
4165 */
4166static int e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, bool fOnWorkerThread)
4167{
4168 int rc = VINF_SUCCESS;
4169 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
4170 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4171 Assert(pDesc->data.cmd.fTSE);
4172 Assert(!e1kXmitIsGsoBuf(pTxSg));
4173
4174 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
4175 Assert(u16MaxPktLen != 0);
4176 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4177
4178 /*
4179 * Carve out segments.
4180 */
4181 do
4182 {
4183 /* Calculate how many bytes we have left in this TCP segment */
4184 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
4185 if (cb > pDesc->data.cmd.u20DTALEN)
4186 {
4187 /* This descriptor fits completely into current segment */
4188 cb = pDesc->data.cmd.u20DTALEN;
4189 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4190 }
4191 else
4192 {
4193 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4194 /*
4195 * Rewind the packet tail pointer to the beginning of payload,
4196 * so we continue writing right beyond the header.
4197 */
4198 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
4199 }
4200
4201 pDesc->data.u64BufAddr += cb;
4202 pDesc->data.cmd.u20DTALEN -= cb;
4203 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4204
4205 if (pDesc->data.cmd.fEOP)
4206 {
4207 /* End of packet, next segment will contain header. */
4208 if (pState->u32PayRemain != 0)
4209 E1K_INC_CNT32(TSCTFC);
4210 pState->u16TxPktLen = 0;
4211 e1kXmitFreeBuf(pState);
4212 }
4213
4214 return false;
4215}
4216#endif /* E1K_WITH_TXD_CACHE */
4217
4218
4219/**
4220 * Add descriptor's buffer to transmit frame.
4221 *
4222 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4223 * TSE frames we cannot handle as GSO.
4224 *
4225 * @returns true on success, false on failure.
4226 *
4227 * @param pThis The device state structure.
4228 * @param PhysAddr The physical address of the descriptor buffer.
4229 * @param cbFragment Length of descriptor's buffer.
4230 * @thread E1000_TX
4231 */
4232static bool e1kAddToFrame(E1KSTATE *pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4233{
4234 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4235 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4236 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4237
4238 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4239 {
4240 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", INSTANCE(pThis), cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4241 return false;
4242 }
4243 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4244 {
4245 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", INSTANCE(pThis), cbNewPkt, pTxSg->cbAvailable));
4246 return false;
4247 }
4248
4249 if (RT_LIKELY(pTxSg))
4250 {
4251 Assert(pTxSg->cSegs == 1);
4252 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4253
4254 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4255 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4256
4257 pTxSg->cbUsed = cbNewPkt;
4258 }
4259 pThis->u16TxPktLen = cbNewPkt;
4260
4261 return true;
4262}
4263
4264
4265/**
4266 * Write the descriptor back to guest memory and notify the guest.
4267 *
4268 * @param pState The device state structure.
4269 * @param pDesc Pointer to the descriptor have been transmitted.
4270 * @param addr Physical address of the descriptor in guest memory.
4271 * @thread E1000_TX
4272 */
4273static void e1kDescReport(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
4274{
4275 /*
4276 * We fake descriptor write-back bursting. Descriptors are written back as they are
4277 * processed.
4278 */
4279 /* Let's pretend we process descriptors. Write back with DD set. */
4280 /*
4281 * Prior to r71586 we tried to accomodate the case when write-back bursts
4282 * are enabled without actually implementing bursting by writing back all
4283 * descriptors, even the ones that do not have RS set. This caused kernel
4284 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4285 * associated with written back descriptor if it happened to be a context
4286 * descriptor since context descriptors do not have skb associated to them.
4287 * Starting from r71586 we write back only the descriptors with RS set,
4288 * which is a little bit different from what the real hardware does in
4289 * case there is a chain of data descritors where some of them have RS set
4290 * and others do not. It is very uncommon scenario imho.
4291 * We need to check RPS as well since some legacy drivers use it instead of
4292 * RS even with newer cards.
4293 */
4294 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4295 {
4296 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4297 e1kWriteBackDesc(pState, pDesc, addr);
4298 if (pDesc->legacy.cmd.fEOP)
4299 {
4300#ifdef E1K_USE_TX_TIMERS
4301 if (pDesc->legacy.cmd.fIDE)
4302 {
4303 E1K_INC_ISTAT_CNT(pState->uStatTxIDE);
4304 //if (pState->fIntRaised)
4305 //{
4306 // /* Interrupt is already pending, no need for timers */
4307 // ICR |= ICR_TXDW;
4308 //}
4309 //else {
4310 /* Arm the timer to fire in TIVD usec (discard .024) */
4311 e1kArmTimer(pState, pState->CTX_SUFF(pTIDTimer), TIDV);
4312# ifndef E1K_NO_TAD
4313 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4314 E1kLog2(("%s Checking if TAD timer is running\n",
4315 INSTANCE(pState)));
4316 if (TADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pTADTimer)))
4317 e1kArmTimer(pState, pState->CTX_SUFF(pTADTimer), TADV);
4318# endif /* E1K_NO_TAD */
4319 }
4320 else
4321 {
4322 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4323 INSTANCE(pState)));
4324# ifndef E1K_NO_TAD
4325 /* Cancel both timers if armed and fire immediately. */
4326 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
4327# endif /* E1K_NO_TAD */
4328#endif /* E1K_USE_TX_TIMERS */
4329 E1K_INC_ISTAT_CNT(pState->uStatIntTx);
4330 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXDW);
4331#ifdef E1K_USE_TX_TIMERS
4332 }
4333#endif /* E1K_USE_TX_TIMERS */
4334 }
4335 }
4336 else
4337 {
4338 E1K_INC_ISTAT_CNT(pState->uStatTxNoRS);
4339 }
4340}
4341
4342#ifndef E1K_WITH_TXD_CACHE
4343/**
4344 * Process Transmit Descriptor.
4345 *
4346 * E1000 supports three types of transmit descriptors:
4347 * - legacy data descriptors of older format (context-less).
4348 * - data the same as legacy but providing new offloading capabilities.
4349 * - context sets up the context for following data descriptors.
4350 *
4351 * @param pState The device state structure.
4352 * @param pDesc Pointer to descriptor union.
4353 * @param addr Physical address of descriptor in guest memory.
4354 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4355 * @thread E1000_TX
4356 */
4357static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4358{
4359 int rc = VINF_SUCCESS;
4360 uint32_t cbVTag = 0;
4361
4362 e1kPrintTDesc(pState, pDesc, "vvv");
4363
4364#ifdef E1K_USE_TX_TIMERS
4365 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4366#endif /* E1K_USE_TX_TIMERS */
4367
4368 switch (e1kGetDescType(pDesc))
4369 {
4370 case E1K_DTYP_CONTEXT:
4371 if (pDesc->context.dw2.fTSE)
4372 {
4373 pState->contextTSE = pDesc->context;
4374 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4375 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4376 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4377 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4378 }
4379 else
4380 {
4381 pState->contextNormal = pDesc->context;
4382 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4383 }
4384 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4385 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4386 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4387 pDesc->context.ip.u8CSS,
4388 pDesc->context.ip.u8CSO,
4389 pDesc->context.ip.u16CSE,
4390 pDesc->context.tu.u8CSS,
4391 pDesc->context.tu.u8CSO,
4392 pDesc->context.tu.u16CSE));
4393 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4394 e1kDescReport(pState, pDesc, addr);
4395 break;
4396
4397 case E1K_DTYP_DATA:
4398 {
4399 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4400 {
4401 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4402 /** @todo Same as legacy when !TSE. See below. */
4403 break;
4404 }
4405 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4406 &pState->StatTxDescTSEData:
4407 &pState->StatTxDescData);
4408 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4409 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4410
4411 /*
4412 * The last descriptor of non-TSE packet must contain VLE flag.
4413 * TSE packets have VLE flag in the first descriptor. The later
4414 * case is taken care of a bit later when cbVTag gets assigned.
4415 *
4416 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4417 */
4418 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4419 {
4420 pState->fVTag = pDesc->data.cmd.fVLE;
4421 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4422 }
4423 /*
4424 * First fragment: Allocate new buffer and save the IXSM and TXSM
4425 * packet options as these are only valid in the first fragment.
4426 */
4427 if (pState->u16TxPktLen == 0)
4428 {
4429 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4430 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4431 E1kLog2(("%s Saving checksum flags:%s%s; \n", INSTANCE(pState),
4432 pState->fIPcsum ? " IP" : "",
4433 pState->fTCPcsum ? " TCP/UDP" : ""));
4434 if (pDesc->data.cmd.fTSE)
4435 {
4436 /* 2) pDesc->data.cmd.fTSE && pState->u16TxPktLen == 0 */
4437 pState->fVTag = pDesc->data.cmd.fVLE;
4438 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4439 cbVTag = pState->fVTag ? 4 : 0;
4440 }
4441 else if (pDesc->data.cmd.fEOP)
4442 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4443 else
4444 cbVTag = 4;
4445 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4446 if (e1kCanDoGso(pState, &pState->GsoCtx, &pDesc->data, &pState->contextTSE))
4447 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw2.u20PAYLEN + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4448 true /*fExactSize*/, true /*fGso*/);
4449 else if (pDesc->data.cmd.fTSE)
4450 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4451 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4452 else
4453 rc = e1kXmitAllocBuf(pState, pDesc->data.cmd.u20DTALEN + cbVTag,
4454 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4455
4456 /**
4457 * @todo: Perhaps it is not that simple for GSO packets! We may
4458 * need to unwind some changes.
4459 */
4460 if (RT_FAILURE(rc))
4461 {
4462 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4463 break;
4464 }
4465 /** @todo Is there any way to indicating errors other than collisions? Like
4466 * VERR_NET_DOWN. */
4467 }
4468
4469 /*
4470 * Add the descriptor data to the frame. If the frame is complete,
4471 * transmit it and reset the u16TxPktLen field.
4472 */
4473 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4474 {
4475 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4476 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4477 if (pDesc->data.cmd.fEOP)
4478 {
4479 if ( fRc
4480 && pState->CTX_SUFF(pTxSg)
4481 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4482 {
4483 e1kTransmitFrame(pState, fOnWorkerThread);
4484 E1K_INC_CNT32(TSCTC);
4485 }
4486 else
4487 {
4488 if (fRc)
4489 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4490 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4491 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4492 e1kXmitFreeBuf(pState);
4493 E1K_INC_CNT32(TSCTFC);
4494 }
4495 pState->u16TxPktLen = 0;
4496 }
4497 }
4498 else if (!pDesc->data.cmd.fTSE)
4499 {
4500 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4501 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4502 if (pDesc->data.cmd.fEOP)
4503 {
4504 if (fRc && pState->CTX_SUFF(pTxSg))
4505 {
4506 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4507 if (pState->fIPcsum)
4508 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4509 pState->contextNormal.ip.u8CSO,
4510 pState->contextNormal.ip.u8CSS,
4511 pState->contextNormal.ip.u16CSE);
4512 if (pState->fTCPcsum)
4513 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4514 pState->contextNormal.tu.u8CSO,
4515 pState->contextNormal.tu.u8CSS,
4516 pState->contextNormal.tu.u16CSE);
4517 e1kTransmitFrame(pState, fOnWorkerThread);
4518 }
4519 else
4520 e1kXmitFreeBuf(pState);
4521 pState->u16TxPktLen = 0;
4522 }
4523 }
4524 else
4525 {
4526 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4527 e1kFallbackAddToFrame(pState, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4528 }
4529
4530 e1kDescReport(pState, pDesc, addr);
4531 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4532 break;
4533 }
4534
4535 case E1K_DTYP_LEGACY:
4536 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4537 {
4538 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4539 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4540 break;
4541 }
4542 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4543 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4544
4545 /* First fragment: allocate new buffer. */
4546 if (pState->u16TxPktLen == 0)
4547 {
4548 if (pDesc->legacy.cmd.fEOP)
4549 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4550 else
4551 cbVTag = 4;
4552 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4553 /** @todo reset status bits? */
4554 rc = e1kXmitAllocBuf(pState, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4555 if (RT_FAILURE(rc))
4556 {
4557 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4558 break;
4559 }
4560
4561 /** @todo Is there any way to indicating errors other than collisions? Like
4562 * VERR_NET_DOWN. */
4563 }
4564
4565 /* Add fragment to frame. */
4566 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4567 {
4568 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4569
4570 /* Last fragment: Transmit and reset the packet storage counter. */
4571 if (pDesc->legacy.cmd.fEOP)
4572 {
4573 pState->fVTag = pDesc->legacy.cmd.fVLE;
4574 pState->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4575 /** @todo Offload processing goes here. */
4576 e1kTransmitFrame(pState, fOnWorkerThread);
4577 pState->u16TxPktLen = 0;
4578 }
4579 }
4580 /* Last fragment + failure: free the buffer and reset the storage counter. */
4581 else if (pDesc->legacy.cmd.fEOP)
4582 {
4583 e1kXmitFreeBuf(pState);
4584 pState->u16TxPktLen = 0;
4585 }
4586
4587 e1kDescReport(pState, pDesc, addr);
4588 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4589 break;
4590
4591 default:
4592 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4593 INSTANCE(pState), e1kGetDescType(pDesc)));
4594 break;
4595 }
4596
4597 return rc;
4598}
4599#else /* E1K_WITH_TXD_CACHE */
4600/**
4601 * Process Transmit Descriptor.
4602 *
4603 * E1000 supports three types of transmit descriptors:
4604 * - legacy data descriptors of older format (context-less).
4605 * - data the same as legacy but providing new offloading capabilities.
4606 * - context sets up the context for following data descriptors.
4607 *
4608 * @param pState The device state structure.
4609 * @param pDesc Pointer to descriptor union.
4610 * @param addr Physical address of descriptor in guest memory.
4611 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4612 * @param cbPacketSize Size of the packet as previously computed.
4613 * @thread E1000_TX
4614 */
4615static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr,
4616 bool fOnWorkerThread)
4617{
4618 int rc = VINF_SUCCESS;
4619 uint32_t cbVTag = 0;
4620
4621 e1kPrintTDesc(pState, pDesc, "vvv");
4622
4623#ifdef E1K_USE_TX_TIMERS
4624 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4625#endif /* E1K_USE_TX_TIMERS */
4626
4627 switch (e1kGetDescType(pDesc))
4628 {
4629 case E1K_DTYP_CONTEXT:
4630 /* The caller have already updated the context */
4631 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4632 e1kDescReport(pState, pDesc, addr);
4633 break;
4634
4635 case E1K_DTYP_DATA:
4636 {
4637 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4638 &pState->StatTxDescTSEData:
4639 &pState->StatTxDescData);
4640 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4641 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4642 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4643 {
4644 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4645 }
4646 else
4647 {
4648 /*
4649 * Add the descriptor data to the frame. If the frame is complete,
4650 * transmit it and reset the u16TxPktLen field.
4651 */
4652 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4653 {
4654 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4655 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4656 if (pDesc->data.cmd.fEOP)
4657 {
4658 if ( fRc
4659 && pState->CTX_SUFF(pTxSg)
4660 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4661 {
4662 e1kTransmitFrame(pState, fOnWorkerThread);
4663 E1K_INC_CNT32(TSCTC);
4664 }
4665 else
4666 {
4667 if (fRc)
4668 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4669 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4670 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4671 e1kXmitFreeBuf(pState);
4672 E1K_INC_CNT32(TSCTFC);
4673 }
4674 pState->u16TxPktLen = 0;
4675 }
4676 }
4677 else if (!pDesc->data.cmd.fTSE)
4678 {
4679 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4680 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4681 if (pDesc->data.cmd.fEOP)
4682 {
4683 if (fRc && pState->CTX_SUFF(pTxSg))
4684 {
4685 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4686 if (pState->fIPcsum)
4687 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4688 pState->contextNormal.ip.u8CSO,
4689 pState->contextNormal.ip.u8CSS,
4690 pState->contextNormal.ip.u16CSE);
4691 if (pState->fTCPcsum)
4692 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4693 pState->contextNormal.tu.u8CSO,
4694 pState->contextNormal.tu.u8CSS,
4695 pState->contextNormal.tu.u16CSE);
4696 e1kTransmitFrame(pState, fOnWorkerThread);
4697 }
4698 else
4699 e1kXmitFreeBuf(pState);
4700 pState->u16TxPktLen = 0;
4701 }
4702 }
4703 else
4704 {
4705 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4706 rc = e1kFallbackAddToFrame(pState, pDesc, fOnWorkerThread);
4707 }
4708 }
4709 e1kDescReport(pState, pDesc, addr);
4710 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4711 break;
4712 }
4713
4714 case E1K_DTYP_LEGACY:
4715 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4716 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4717 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4718 {
4719 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4720 }
4721 else
4722 {
4723 /* Add fragment to frame. */
4724 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4725 {
4726 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4727
4728 /* Last fragment: Transmit and reset the packet storage counter. */
4729 if (pDesc->legacy.cmd.fEOP)
4730 {
4731 if (pDesc->legacy.cmd.fIC)
4732 {
4733 e1kInsertChecksum(pState,
4734 (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4735 pState->u16TxPktLen,
4736 pDesc->legacy.cmd.u8CSO,
4737 pDesc->legacy.dw3.u8CSS,
4738 0);
4739 }
4740 e1kTransmitFrame(pState, fOnWorkerThread);
4741 pState->u16TxPktLen = 0;
4742 }
4743 }
4744 /* Last fragment + failure: free the buffer and reset the storage counter. */
4745 else if (pDesc->legacy.cmd.fEOP)
4746 {
4747 e1kXmitFreeBuf(pState);
4748 pState->u16TxPktLen = 0;
4749 }
4750 }
4751 e1kDescReport(pState, pDesc, addr);
4752 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4753 break;
4754
4755 default:
4756 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4757 INSTANCE(pState), e1kGetDescType(pDesc)));
4758 break;
4759 }
4760
4761 return rc;
4762}
4763
4764
4765DECLINLINE(void) e1kUpdateTxContext(E1KSTATE* pState, E1KTXDESC* pDesc)
4766{
4767 if (pDesc->context.dw2.fTSE)
4768 {
4769 pState->contextTSE = pDesc->context;
4770 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4771 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4772 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4773 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4774 }
4775 else
4776 {
4777 pState->contextNormal = pDesc->context;
4778 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4779 }
4780 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4781 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4782 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4783 pDesc->context.ip.u8CSS,
4784 pDesc->context.ip.u8CSO,
4785 pDesc->context.ip.u16CSE,
4786 pDesc->context.tu.u8CSS,
4787 pDesc->context.tu.u8CSO,
4788 pDesc->context.tu.u16CSE));
4789}
4790
4791
4792static bool e1kLocateTxPacket(E1KSTATE *pState)
4793{
4794 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4795 INSTANCE(pState), pState->cbTxAlloc));
4796 /* Check if we have located the packet already. */
4797 if (pState->cbTxAlloc)
4798 {
4799 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4800 INSTANCE(pState), pState->cbTxAlloc));
4801 return true;
4802 }
4803
4804 bool fTSE = false;
4805 uint32_t cbPacket = 0;
4806
4807 for (int i = pState->iTxDCurrent; i < pState->nTxDFetched; ++i)
4808 {
4809 E1KTXDESC *pDesc = &pState->aTxDescriptors[i];
4810 switch (e1kGetDescType(pDesc))
4811 {
4812 case E1K_DTYP_CONTEXT:
4813 e1kUpdateTxContext(pState, pDesc);
4814 continue;
4815 case E1K_DTYP_LEGACY:
4816 /* Skip empty descriptors. */
4817 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4818 break;
4819 cbPacket += pDesc->legacy.cmd.u16Length;
4820 pState->fGSO = false;
4821 break;
4822 case E1K_DTYP_DATA:
4823 /* Skip empty descriptors. */
4824 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4825 break;
4826 if (cbPacket == 0)
4827 {
4828 /*
4829 * The first fragment: save IXSM and TXSM options
4830 * as these are only valid in the first fragment.
4831 */
4832 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4833 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4834 fTSE = pDesc->data.cmd.fTSE;
4835 /*
4836 * TSE descriptors have VLE bit properly set in
4837 * the first fragment.
4838 */
4839 if (fTSE)
4840 {
4841 pState->fVTag = pDesc->data.cmd.fVLE;
4842 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4843 }
4844 pState->fGSO = e1kCanDoGso(pState, &pState->GsoCtx, &pDesc->data, &pState->contextTSE);
4845 }
4846 cbPacket += pDesc->data.cmd.u20DTALEN;
4847 break;
4848 default:
4849 AssertMsgFailed(("Impossible descriptor type!"));
4850 }
4851 if (pDesc->legacy.cmd.fEOP)
4852 {
4853 /*
4854 * Non-TSE descriptors have VLE bit properly set in
4855 * the last fragment.
4856 */
4857 if (!fTSE)
4858 {
4859 pState->fVTag = pDesc->data.cmd.fVLE;
4860 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4861 }
4862 /*
4863 * Compute the required buffer size. If we cannot do GSO but still
4864 * have to do segmentation we allocate the first segment only.
4865 */
4866 pState->cbTxAlloc = (!fTSE || pState->fGSO) ?
4867 cbPacket :
4868 RT_MIN(cbPacket, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN);
4869 if (pState->fVTag)
4870 pState->cbTxAlloc += 4;
4871 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4872 INSTANCE(pState), pState->cbTxAlloc));
4873 return true;
4874 }
4875 }
4876
4877 if (cbPacket == 0 && pState->nTxDFetched - pState->iTxDCurrent > 0)
4878 {
4879 /* All descriptors were empty, we need to process them as a dummy packet */
4880 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
4881 INSTANCE(pState), pState->cbTxAlloc));
4882 return true;
4883 }
4884 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
4885 INSTANCE(pState), pState->cbTxAlloc));
4886 return false;
4887}
4888
4889
4890static int e1kXmitPacket(E1KSTATE *pState, bool fOnWorkerThread)
4891{
4892 int rc = VINF_SUCCESS;
4893
4894 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
4895 INSTANCE(pState), pState->iTxDCurrent, pState->nTxDFetched));
4896
4897 while (pState->iTxDCurrent < pState->nTxDFetched)
4898 {
4899 E1KTXDESC *pDesc = &pState->aTxDescriptors[pState->iTxDCurrent];
4900 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4901 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
4902 rc = e1kXmitDesc(pState, pDesc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
4903 if (RT_FAILURE(rc))
4904 break;
4905 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
4906 TDH = 0;
4907 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
4908 if (uLowThreshold != 0 && e1kGetTxLen(pState) <= uLowThreshold)
4909 {
4910 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4911 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4912 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4913 }
4914 ++pState->iTxDCurrent;
4915 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
4916 break;
4917 }
4918
4919 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
4920 INSTANCE(pState), rc, pState->iTxDCurrent, pState->nTxDFetched));
4921 return rc;
4922}
4923#endif /* E1K_WITH_TXD_CACHE */
4924
4925#ifndef E1K_WITH_TXD_CACHE
4926/**
4927 * Transmit pending descriptors.
4928 *
4929 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
4930 *
4931 * @param pState The E1000 state.
4932 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
4933 */
4934static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
4935{
4936 int rc = VINF_SUCCESS;
4937
4938 /* Check if transmitter is enabled. */
4939 if (!(TCTL & TCTL_EN))
4940 return VINF_SUCCESS;
4941 /*
4942 * Grab the xmit lock of the driver as well as the E1K device state.
4943 */
4944 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
4945 if (RT_LIKELY(rc == VINF_SUCCESS))
4946 {
4947 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
4948 if (pDrv)
4949 {
4950 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
4951 if (RT_FAILURE(rc))
4952 {
4953 e1kCsTxLeave(pState);
4954 return rc;
4955 }
4956 }
4957 /*
4958 * Process all pending descriptors.
4959 * Note! Do not process descriptors in locked state
4960 */
4961 while (TDH != TDT && !pState->fLocked)
4962 {
4963 E1KTXDESC desc;
4964 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4965 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
4966
4967 e1kLoadDesc(pState, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
4968 rc = e1kXmitDesc(pState, &desc, e1kDescAddr(TDBAH, TDBAL, TDH), fOnWorkerThread);
4969 /* If we failed to transmit descriptor we will try it again later */
4970 if (RT_FAILURE(rc))
4971 break;
4972 if (++TDH * sizeof(desc) >= TDLEN)
4973 TDH = 0;
4974
4975 if (e1kGetTxLen(pState) <= GET_BITS(TXDCTL, LWTHRESH)*8)
4976 {
4977 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4978 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4979 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4980 }
4981
4982 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4983 }
4984
4985 /// @todo: uncomment: pState->uStatIntTXQE++;
4986 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
4987 /*
4988 * Release the lock.
4989 */
4990 if (pDrv)
4991 pDrv->pfnEndXmit(pDrv);
4992 e1kCsTxLeave(pState);
4993 }
4994
4995 return rc;
4996}
4997#else /* E1K_WITH_TXD_CACHE */
4998static void e1kDumpTxDCache(E1KSTATE *pState)
4999{
5000 unsigned i, cDescs = TDLEN / sizeof(E1KTXDESC);
5001 uint32_t tdh = TDH;
5002 LogRel(("-- Transmit Descriptors (%d total) --\n", cDescs));
5003 for (i = 0; i < cDescs; ++i)
5004 {
5005 E1KTXDESC desc;
5006 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(TDBAH, TDBAL, i),
5007 &desc, sizeof(desc));
5008 if (i == tdh)
5009 LogRel((">>> "));
5010 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc));
5011 }
5012 LogRel(("-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
5013 pState->iTxDCurrent, TDH, pState->nTxDFetched, E1K_TXD_CACHE_SIZE));
5014 if (tdh > pState->iTxDCurrent)
5015 tdh -= pState->iTxDCurrent;
5016 else
5017 tdh = cDescs + tdh - pState->iTxDCurrent;
5018 for (i = 0; i < pState->nTxDFetched; ++i)
5019 {
5020 if (i == pState->iTxDCurrent)
5021 LogRel((">>> "));
5022 LogRel(("%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs), &pState->aTxDescriptors[i]));
5023 }
5024}
5025
5026/**
5027 * Transmit pending descriptors.
5028 *
5029 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
5030 *
5031 * @param pState The E1000 state.
5032 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
5033 */
5034static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
5035{
5036 int rc = VINF_SUCCESS;
5037
5038 /* Check if transmitter is enabled. */
5039 if (!(TCTL & TCTL_EN))
5040 return VINF_SUCCESS;
5041 /*
5042 * Grab the xmit lock of the driver as well as the E1K device state.
5043 */
5044 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
5045 if (pDrv)
5046 {
5047 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
5048 if (RT_FAILURE(rc))
5049 return rc;
5050 }
5051
5052 /*
5053 * Process all pending descriptors.
5054 * Note! Do not process descriptors in locked state
5055 */
5056 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
5057 if (RT_LIKELY(rc == VINF_SUCCESS))
5058 {
5059 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
5060 /*
5061 * fIncomplete is set whenever we try to fetch additional descriptors
5062 * for an incomplete packet. If fail to locate a complete packet on
5063 * the next iteration we need to reset the cache or we risk to get
5064 * stuck in this loop forever.
5065 */
5066 bool fIncomplete = false;
5067 while (!pState->fLocked && e1kTxDLazyLoad(pState))
5068 {
5069 while (e1kLocateTxPacket(pState))
5070 {
5071 fIncomplete = false;
5072 /* Found a complete packet, allocate it. */
5073 rc = e1kXmitAllocBuf(pState, pState->fGSO);
5074 /* If we're out of bandwidth we'll come back later. */
5075 if (RT_FAILURE(rc))
5076 goto out;
5077 /* Copy the packet to allocated buffer and send it. */
5078 rc = e1kXmitPacket(pState, fOnWorkerThread);
5079 /* If we're out of bandwidth we'll come back later. */
5080 if (RT_FAILURE(rc))
5081 goto out;
5082 }
5083 uint8_t u8Remain = pState->nTxDFetched - pState->iTxDCurrent;
5084 if (RT_UNLIKELY(fIncomplete))
5085 {
5086 static bool fTxDCacheDumped = false;
5087 /*
5088 * The descriptor cache is full, but we were unable to find
5089 * a complete packet in it. Drop the cache and hope that
5090 * the guest driver can recover from network card error.
5091 */
5092 LogRel(("%s No complete packets in%s TxD cache! "
5093 "Fetched=%d, current=%d, TX len=%d.\n",
5094 INSTANCE(pState),
5095 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
5096 pState->nTxDFetched, pState->iTxDCurrent,
5097 e1kGetTxLen(pState)));
5098 if (!fTxDCacheDumped)
5099 {
5100 fTxDCacheDumped = true;
5101 e1kDumpTxDCache(pState);
5102 }
5103 pState->iTxDCurrent = pState->nTxDFetched = 0;
5104 /*
5105 * Returning an error at this point means Guru in R0
5106 * (see @bugref{6428}).
5107 */
5108# ifdef IN_RING3
5109 rc = VERR_NET_INCOMPLETE_TX_PACKET;
5110# else /* !IN_RING3 */
5111 rc = VINF_IOM_R3_IOPORT_WRITE;
5112# endif /* !IN_RING3 */
5113 goto out;
5114 }
5115 if (u8Remain > 0)
5116 {
5117 Log4(("%s Incomplete packet at %d. Already fetched %d, "
5118 "%d more are available\n",
5119 INSTANCE(pState), pState->iTxDCurrent, u8Remain,
5120 e1kGetTxLen(pState) - u8Remain));
5121
5122 /*
5123 * A packet was partially fetched. Move incomplete packet to
5124 * the beginning of cache buffer, then load more descriptors.
5125 */
5126 memmove(pState->aTxDescriptors,
5127 &pState->aTxDescriptors[pState->iTxDCurrent],
5128 u8Remain * sizeof(E1KTXDESC));
5129 pState->iTxDCurrent = 0;
5130 pState->nTxDFetched = u8Remain;
5131 e1kTxDLoadMore(pState);
5132 fIncomplete = true;
5133 }
5134 else
5135 pState->nTxDFetched = 0;
5136 pState->iTxDCurrent = 0;
5137 }
5138 if (!pState->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
5139 {
5140 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
5141 INSTANCE(pState)));
5142 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
5143 }
5144out:
5145 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
5146
5147 /// @todo: uncomment: pState->uStatIntTXQE++;
5148 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
5149
5150 e1kCsTxLeave(pState);
5151 }
5152
5153
5154 /*
5155 * Release the lock.
5156 */
5157 if (pDrv)
5158 pDrv->pfnEndXmit(pDrv);
5159 return rc;
5160}
5161#endif /* E1K_WITH_TXD_CACHE */
5162
5163#ifdef IN_RING3
5164
5165/**
5166 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5167 */
5168static DECLCALLBACK(void) e1kNetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5169{
5170 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5171 /* Resume suspended transmission */
5172 STATUS &= ~STATUS_TXOFF;
5173 e1kXmitPending(pState, true /*fOnWorkerThread*/);
5174}
5175
5176/**
5177 * Callback for consuming from transmit queue. It gets called in R3 whenever
5178 * we enqueue something in R0/GC.
5179 *
5180 * @returns true
5181 * @param pDevIns Pointer to device instance structure.
5182 * @param pItem Pointer to the element being dequeued (not used).
5183 * @thread ???
5184 */
5185static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5186{
5187 NOREF(pItem);
5188 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5189 E1kLog2(("%s e1kTxQueueConsumer:\n", INSTANCE(pState)));
5190
5191 int rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
5192 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5193
5194 return true;
5195}
5196
5197/**
5198 * Handler for the wakeup signaller queue.
5199 */
5200static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5201{
5202 e1kWakeupReceive(pDevIns);
5203 return true;
5204}
5205
5206#endif /* IN_RING3 */
5207
5208/**
5209 * Write handler for Transmit Descriptor Tail register.
5210 *
5211 * @param pState The device state structure.
5212 * @param offset Register offset in memory-mapped frame.
5213 * @param index Register index in register array.
5214 * @param value The value to store.
5215 * @param mask Used to implement partial writes (8 and 16-bit).
5216 * @thread EMT
5217 */
5218static int e1kRegWriteTDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5219{
5220 int rc = e1kRegWriteDefault(pState, offset, index, value);
5221
5222 /* All descriptors starting with head and not including tail belong to us. */
5223 /* Process them. */
5224 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5225 INSTANCE(pState), TDBAL, TDBAH, TDLEN, TDH, TDT));
5226
5227 /* Ignore TDT writes when the link is down. */
5228 if (TDH != TDT && (STATUS & STATUS_LU))
5229 {
5230 E1kLogRel(("E1000: TDT write: %d descriptors to process\n", e1kGetTxLen(pState)));
5231 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5232 INSTANCE(pState), e1kGetTxLen(pState)));
5233
5234 /* Transmit pending packets if possible, defer it if we cannot do it
5235 in the current context. */
5236#ifdef E1K_TX_DELAY
5237 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
5238 if (RT_LIKELY(rc == VINF_SUCCESS))
5239 {
5240 if (!TMTimerIsActive(pState->CTX_SUFF(pTXDTimer)))
5241 {
5242#ifdef E1K_INT_STATS
5243 pState->u64ArmedAt = RTTimeNanoTS();
5244#endif /* E1K_INT_STATS */
5245 e1kArmTimer(pState, pState->CTX_SUFF(pTXDTimer), E1K_TX_DELAY);
5246 }
5247 E1K_INC_ISTAT_CNT(pState->uStatTxDelayed);
5248 e1kCsTxLeave(pState);
5249 return rc;
5250 }
5251 /* We failed to enter the TX critical section -- transmit as usual. */
5252#endif /* E1K_TX_DELAY */
5253# ifndef IN_RING3
5254 if (!pState->CTX_SUFF(pDrv))
5255 {
5256 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pTxQueue));
5257 if (RT_UNLIKELY(pItem))
5258 PDMQueueInsert(pState->CTX_SUFF(pTxQueue), pItem);
5259 }
5260 else
5261# endif
5262 {
5263 rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
5264 if (rc == VERR_TRY_AGAIN)
5265 rc = VINF_SUCCESS;
5266 else if (rc == VERR_SEM_BUSY)
5267 rc = VINF_IOM_R3_IOPORT_WRITE;
5268 AssertRC(rc);
5269 }
5270 }
5271
5272 return rc;
5273}
5274
5275/**
5276 * Write handler for Multicast Table Array registers.
5277 *
5278 * @param pState The device state structure.
5279 * @param offset Register offset in memory-mapped frame.
5280 * @param index Register index in register array.
5281 * @param value The value to store.
5282 * @thread EMT
5283 */
5284static int e1kRegWriteMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5285{
5286 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
5287 pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])] = value;
5288
5289 return VINF_SUCCESS;
5290}
5291
5292/**
5293 * Read handler for Multicast Table Array registers.
5294 *
5295 * @returns VBox status code.
5296 *
5297 * @param pState The device state structure.
5298 * @param offset Register offset in memory-mapped frame.
5299 * @param index Register index in register array.
5300 * @thread EMT
5301 */
5302static int e1kRegReadMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5303{
5304 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
5305 *pu32Value = pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])];
5306
5307 return VINF_SUCCESS;
5308}
5309
5310/**
5311 * Write handler for Receive Address registers.
5312 *
5313 * @param pState The device state structure.
5314 * @param offset Register offset in memory-mapped frame.
5315 * @param index Register index in register array.
5316 * @param value The value to store.
5317 * @thread EMT
5318 */
5319static int e1kRegWriteRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5320{
5321 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
5322 pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])] = value;
5323
5324 return VINF_SUCCESS;
5325}
5326
5327/**
5328 * Read handler for Receive Address registers.
5329 *
5330 * @returns VBox status code.
5331 *
5332 * @param pState The device state structure.
5333 * @param offset Register offset in memory-mapped frame.
5334 * @param index Register index in register array.
5335 * @thread EMT
5336 */
5337static int e1kRegReadRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5338{
5339 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
5340 *pu32Value = pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])];
5341
5342 return VINF_SUCCESS;
5343}
5344
5345/**
5346 * Write handler for VLAN Filter Table Array registers.
5347 *
5348 * @param pState The device state structure.
5349 * @param offset Register offset in memory-mapped frame.
5350 * @param index Register index in register array.
5351 * @param value The value to store.
5352 * @thread EMT
5353 */
5354static int e1kRegWriteVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5355{
5356 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auVFTA), VINF_SUCCESS);
5357 pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])] = value;
5358
5359 return VINF_SUCCESS;
5360}
5361
5362/**
5363 * Read handler for VLAN Filter Table Array registers.
5364 *
5365 * @returns VBox status code.
5366 *
5367 * @param pState The device state structure.
5368 * @param offset Register offset in memory-mapped frame.
5369 * @param index Register index in register array.
5370 * @thread EMT
5371 */
5372static int e1kRegReadVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5373{
5374 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auVFTA), VERR_DEV_IO_ERROR);
5375 *pu32Value = pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])];
5376
5377 return VINF_SUCCESS;
5378}
5379
5380/**
5381 * Read handler for unimplemented registers.
5382 *
5383 * Merely reports reads from unimplemented registers.
5384 *
5385 * @returns VBox status code.
5386 *
5387 * @param pState The device state structure.
5388 * @param offset Register offset in memory-mapped frame.
5389 * @param index Register index in register array.
5390 * @thread EMT
5391 */
5392
5393static int e1kRegReadUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5394{
5395 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5396 INSTANCE(pState), offset, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5397 *pu32Value = 0;
5398
5399 return VINF_SUCCESS;
5400}
5401
5402/**
5403 * Default register read handler with automatic clear operation.
5404 *
5405 * Retrieves the value of register from register array in device state structure.
5406 * Then resets all bits.
5407 *
5408 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5409 * done in the caller.
5410 *
5411 * @returns VBox status code.
5412 *
5413 * @param pState The device state structure.
5414 * @param offset Register offset in memory-mapped frame.
5415 * @param index Register index in register array.
5416 * @thread EMT
5417 */
5418
5419static int e1kRegReadAutoClear(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5420{
5421 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5422 int rc = e1kRegReadDefault(pState, offset, index, pu32Value);
5423 pState->auRegs[index] = 0;
5424
5425 return rc;
5426}
5427
5428/**
5429 * Default register read handler.
5430 *
5431 * Retrieves the value of register from register array in device state structure.
5432 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5433 *
5434 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5435 * done in the caller.
5436 *
5437 * @returns VBox status code.
5438 *
5439 * @param pState The device state structure.
5440 * @param offset Register offset in memory-mapped frame.
5441 * @param index Register index in register array.
5442 * @thread EMT
5443 */
5444
5445static int e1kRegReadDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5446{
5447 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5448 *pu32Value = pState->auRegs[index] & s_e1kRegMap[index].readable;
5449
5450 return VINF_SUCCESS;
5451}
5452
5453/**
5454 * Write handler for unimplemented registers.
5455 *
5456 * Merely reports writes to unimplemented registers.
5457 *
5458 * @param pState The device state structure.
5459 * @param offset Register offset in memory-mapped frame.
5460 * @param index Register index in register array.
5461 * @param value The value to store.
5462 * @thread EMT
5463 */
5464
5465 static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5466{
5467 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5468 INSTANCE(pState), offset, value, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5469
5470 return VINF_SUCCESS;
5471}
5472
5473/**
5474 * Default register write handler.
5475 *
5476 * Stores the value to the register array in device state structure. Only bits
5477 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5478 *
5479 * @returns VBox status code.
5480 *
5481 * @param pState The device state structure.
5482 * @param offset Register offset in memory-mapped frame.
5483 * @param index Register index in register array.
5484 * @param value The value to store.
5485 * @param mask Used to implement partial writes (8 and 16-bit).
5486 * @thread EMT
5487 */
5488
5489static int e1kRegWriteDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5490{
5491 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5492 pState->auRegs[index] = (value & s_e1kRegMap[index].writable) |
5493 (pState->auRegs[index] & ~s_e1kRegMap[index].writable);
5494
5495 return VINF_SUCCESS;
5496}
5497
5498/**
5499 * Search register table for matching register.
5500 *
5501 * @returns Index in the register table or -1 if not found.
5502 *
5503 * @param pState The device state structure.
5504 * @param uOffset Register offset in memory-mapped region.
5505 * @thread EMT
5506 */
5507static int e1kRegLookup(E1KSTATE *pState, uint32_t uOffset)
5508{
5509 int index;
5510
5511 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5512 {
5513 if (s_e1kRegMap[index].offset <= uOffset && uOffset < s_e1kRegMap[index].offset + s_e1kRegMap[index].size)
5514 {
5515 return index;
5516 }
5517 }
5518
5519 return -1;
5520}
5521
5522/**
5523 * Handle register read operation.
5524 *
5525 * Looks up and calls appropriate handler.
5526 *
5527 * @returns VBox status code.
5528 *
5529 * @param pState The device state structure.
5530 * @param uOffset Register offset in memory-mapped frame.
5531 * @param pv Where to store the result.
5532 * @param cb Number of bytes to read.
5533 * @thread EMT
5534 */
5535static int e1kRegRead(E1KSTATE *pState, uint32_t uOffset, void *pv, uint32_t cb)
5536{
5537 uint32_t u32 = 0;
5538 uint32_t mask = 0;
5539 uint32_t shift;
5540 int rc = VINF_SUCCESS;
5541 int index = e1kRegLookup(pState, uOffset);
5542 const char *szInst = INSTANCE(pState);
5543#ifdef DEBUG
5544 char buf[9];
5545#endif
5546
5547 /*
5548 * From the spec:
5549 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5550 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5551 */
5552
5553 /*
5554 * To be able to write bytes and short word we convert them
5555 * to properly shifted 32-bit words and masks. The idea is
5556 * to keep register-specific handlers simple. Most accesses
5557 * will be 32-bit anyway.
5558 */
5559 switch (cb)
5560 {
5561 case 1: mask = 0x000000FF; break;
5562 case 2: mask = 0x0000FFFF; break;
5563 case 4: mask = 0xFFFFFFFF; break;
5564 default:
5565 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5566 "%s e1kRegRead: unsupported op size: offset=%#10x cb=%#10x\n",
5567 szInst, uOffset, cb);
5568 }
5569 if (index != -1)
5570 {
5571 if (s_e1kRegMap[index].readable)
5572 {
5573 /* Make the mask correspond to the bits we are about to read. */
5574 shift = (uOffset - s_e1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5575 mask <<= shift;
5576 if (!mask)
5577 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5578 "%s e1kRegRead: Zero mask: offset=%#10x cb=%#10x\n",
5579 szInst, uOffset, cb);
5580 /*
5581 * Read it. Pass the mask so the handler knows what has to be read.
5582 * Mask out irrelevant bits.
5583 */
5584 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5585 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5586 return rc;
5587 //pState->fDelayInts = false;
5588 //pState->iStatIntLost += pState->iStatIntLostOne;
5589 //pState->iStatIntLostOne = 0;
5590 rc = s_e1kRegMap[index].pfnRead(pState, uOffset & 0xFFFFFFFC, index, &u32);
5591 u32 &= mask;
5592 //e1kCsLeave(pState);
5593 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5594 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5595 /* Shift back the result. */
5596 u32 >>= shift;
5597 }
5598 else
5599 {
5600 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5601 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5602 }
5603 }
5604 else
5605 {
5606 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5607 szInst, uOffset, e1kU32toHex(u32, mask, buf)));
5608 }
5609
5610 memcpy(pv, &u32, cb);
5611 return rc;
5612}
5613
5614/**
5615 * Handle register write operation.
5616 *
5617 * Looks up and calls appropriate handler.
5618 *
5619 * @returns VBox status code.
5620 *
5621 * @param pState The device state structure.
5622 * @param uOffset Register offset in memory-mapped frame.
5623 * @param pv Where to fetch the value.
5624 * @param cb Number of bytes to write.
5625 * @thread EMT
5626 */
5627static int e1kRegWrite(E1KSTATE *pState, uint32_t uOffset, void const *pv, unsigned cb)
5628{
5629 int rc = VINF_SUCCESS;
5630 int index = e1kRegLookup(pState, uOffset);
5631 uint32_t u32;
5632
5633 /*
5634 * From the spec:
5635 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5636 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5637 */
5638
5639 if (cb != 4)
5640 {
5641 E1kLog(("%s e1kRegWrite: Spec violation: unsupported op size: offset=%#10x cb=%#10x, ignored.\n",
5642 INSTANCE(pState), uOffset, cb));
5643 return VINF_SUCCESS;
5644 }
5645 if (uOffset & 3)
5646 {
5647 E1kLog(("%s e1kRegWrite: Spec violation: misaligned offset: %#10x cb=%#10x, ignored.\n",
5648 INSTANCE(pState), uOffset, cb));
5649 return VINF_SUCCESS;
5650 }
5651 u32 = *(uint32_t*)pv;
5652 if (index != -1)
5653 {
5654 if (s_e1kRegMap[index].writable)
5655 {
5656 /*
5657 * Write it. Pass the mask so the handler knows what has to be written.
5658 * Mask out irrelevant bits.
5659 */
5660 E1kLog2(("%s At %08X write %08X to %s (%s)\n",
5661 INSTANCE(pState), uOffset, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5662 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5663 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5664 return rc;
5665 //pState->fDelayInts = false;
5666 //pState->iStatIntLost += pState->iStatIntLostOne;
5667 //pState->iStatIntLostOne = 0;
5668 rc = s_e1kRegMap[index].pfnWrite(pState, uOffset, index, u32);
5669 //e1kCsLeave(pState);
5670 }
5671 else
5672 {
5673 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5674 INSTANCE(pState), uOffset, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5675 }
5676 }
5677 else
5678 {
5679 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5680 INSTANCE(pState), uOffset, u32));
5681 }
5682 return rc;
5683}
5684
5685/**
5686 * I/O handler for memory-mapped read operations.
5687 *
5688 * @returns VBox status code.
5689 *
5690 * @param pDevIns The device instance.
5691 * @param pvUser User argument.
5692 * @param GCPhysAddr Physical address (in GC) where the read starts.
5693 * @param pv Where to store the result.
5694 * @param cb Number of bytes read.
5695 * @thread EMT
5696 */
5697PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser,
5698 RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5699{
5700 NOREF(pvUser);
5701 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5702 uint32_t uOffset = GCPhysAddr - pState->addrMMReg;
5703 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIORead), a);
5704
5705 Assert(uOffset < E1K_MM_SIZE);
5706
5707 int rc = e1kRegRead(pState, uOffset, pv, cb);
5708 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIORead), a);
5709 return rc;
5710}
5711
5712/**
5713 * Memory mapped I/O Handler for write operations.
5714 *
5715 * @returns VBox status code.
5716 *
5717 * @param pDevIns The device instance.
5718 * @param pvUser User argument.
5719 * @param GCPhysAddr Physical address (in GC) where the read starts.
5720 * @param pv Where to fetch the value.
5721 * @param cb Number of bytes to write.
5722 * @thread EMT
5723 */
5724PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser,
5725 RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5726{
5727 NOREF(pvUser);
5728 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5729 uint32_t uOffset = GCPhysAddr - pState->addrMMReg;
5730 int rc;
5731 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5732
5733 Assert(uOffset < E1K_MM_SIZE);
5734 if (cb != 4)
5735 {
5736 E1kLog(("%s e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x", pDevIns, uOffset, cb));
5737 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x\n", uOffset, cb);
5738 }
5739 else
5740 rc = e1kRegWrite(pState, uOffset, pv, cb);
5741
5742 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5743 return rc;
5744}
5745
5746/**
5747 * Port I/O Handler for IN operations.
5748 *
5749 * @returns VBox status code.
5750 *
5751 * @param pDevIns The device instance.
5752 * @param pvUser Pointer to the device state structure.
5753 * @param port Port number used for the IN operation.
5754 * @param pu32 Where to store the result.
5755 * @param cb Number of bytes read.
5756 * @thread EMT
5757 */
5758PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser,
5759 RTIOPORT port, uint32_t *pu32, unsigned cb)
5760{
5761 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5762 int rc = VINF_SUCCESS;
5763 const char *szInst = INSTANCE(pState);
5764 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIORead), a);
5765
5766 port -= pState->addrIOPort;
5767 if (cb != 4)
5768 {
5769 E1kLog(("%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x", szInst, port, cb));
5770 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5771 }
5772 else
5773 switch (port)
5774 {
5775 case 0x00: /* IOADDR */
5776 *pu32 = pState->uSelectedReg;
5777 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5778 break;
5779 case 0x04: /* IODATA */
5780 rc = e1kRegRead(pState, pState->uSelectedReg, pu32, cb);
5781 /** @todo wrong return code triggers assertions in the debug build; fix please */
5782 if (rc == VINF_IOM_R3_MMIO_READ)
5783 rc = VINF_IOM_R3_IOPORT_READ;
5784
5785 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5786 break;
5787 default:
5788 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", szInst, port));
5789 //*pRC = VERR_IOM_IOPORT_UNUSED;
5790 }
5791
5792 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIORead), a);
5793 return rc;
5794}
5795
5796
5797/**
5798 * Port I/O Handler for OUT operations.
5799 *
5800 * @returns VBox status code.
5801 *
5802 * @param pDevIns The device instance.
5803 * @param pvUser User argument.
5804 * @param Port Port number used for the IN operation.
5805 * @param u32 The value to output.
5806 * @param cb The value size in bytes.
5807 * @thread EMT
5808 */
5809PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser,
5810 RTIOPORT port, uint32_t u32, unsigned cb)
5811{
5812 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5813 int rc = VINF_SUCCESS;
5814 const char *szInst = INSTANCE(pState);
5815 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIOWrite), a);
5816
5817 E1kLog2(("%s e1kIOPortOut: port=%RTiop value=%08x\n", szInst, port, u32));
5818 if (cb != 4)
5819 {
5820 E1kLog(("%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb));
5821 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5822 }
5823 else
5824 {
5825 port -= pState->addrIOPort;
5826 switch (port)
5827 {
5828 case 0x00: /* IOADDR */
5829 pState->uSelectedReg = u32;
5830 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", szInst, pState->uSelectedReg));
5831 break;
5832 case 0x04: /* IODATA */
5833 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", szInst, pState->uSelectedReg, u32));
5834 rc = e1kRegWrite(pState, pState->uSelectedReg, &u32, cb);
5835 /** @todo wrong return code triggers assertions in the debug build; fix please */
5836 if (rc == VINF_IOM_R3_MMIO_WRITE)
5837 rc = VINF_IOM_R3_IOPORT_WRITE;
5838 break;
5839 default:
5840 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", szInst, port));
5841 /** @todo Do we need to return an error here?
5842 * bird: VINF_SUCCESS is fine for unhandled cases of an OUT handler. (If you're curious
5843 * about the guest code and a bit adventuresome, try rc = PDMDeviceDBGFStop(...);) */
5844 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kIOPortOut: invalid port %#010x\n", port);
5845 }
5846 }
5847
5848 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIOWrite), a);
5849 return rc;
5850}
5851
5852#ifdef IN_RING3
5853/**
5854 * Dump complete device state to log.
5855 *
5856 * @param pState Pointer to device state.
5857 */
5858static void e1kDumpState(E1KSTATE *pState)
5859{
5860 for (int i = 0; i<E1K_NUM_OF_32BIT_REGS; ++i)
5861 {
5862 E1kLog2(("%s %8.8s = %08x\n", INSTANCE(pState),
5863 s_e1kRegMap[i].abbrev, pState->auRegs[i]));
5864 }
5865#ifdef E1K_INT_STATS
5866 LogRel(("%s Interrupt attempts: %d\n", INSTANCE(pState), pState->uStatIntTry));
5867 LogRel(("%s Interrupts raised : %d\n", INSTANCE(pState), pState->uStatInt));
5868 LogRel(("%s Interrupts lowered: %d\n", INSTANCE(pState), pState->uStatIntLower));
5869 LogRel(("%s Interrupts delayed: %d\n", INSTANCE(pState), pState->uStatIntDly));
5870 LogRel(("%s Disabled delayed: %d\n", INSTANCE(pState), pState->uStatDisDly));
5871 LogRel(("%s Interrupts skipped: %d\n", INSTANCE(pState), pState->uStatIntSkip));
5872 LogRel(("%s Masked interrupts : %d\n", INSTANCE(pState), pState->uStatIntMasked));
5873 LogRel(("%s Early interrupts : %d\n", INSTANCE(pState), pState->uStatIntEarly));
5874 LogRel(("%s Late interrupts : %d\n", INSTANCE(pState), pState->uStatIntLate));
5875 LogRel(("%s Lost interrupts : %d\n", INSTANCE(pState), pState->iStatIntLost));
5876 LogRel(("%s Interrupts by RX : %d\n", INSTANCE(pState), pState->uStatIntRx));
5877 LogRel(("%s Interrupts by TX : %d\n", INSTANCE(pState), pState->uStatIntTx));
5878 LogRel(("%s Interrupts by ICS : %d\n", INSTANCE(pState), pState->uStatIntICS));
5879 LogRel(("%s Interrupts by RDTR: %d\n", INSTANCE(pState), pState->uStatIntRDTR));
5880 LogRel(("%s Interrupts by RDMT: %d\n", INSTANCE(pState), pState->uStatIntRXDMT0));
5881 LogRel(("%s Interrupts by TXQE: %d\n", INSTANCE(pState), pState->uStatIntTXQE));
5882 LogRel(("%s TX int delay asked: %d\n", INSTANCE(pState), pState->uStatTxIDE));
5883 LogRel(("%s TX delayed: %d\n", INSTANCE(pState), pState->uStatTxDelayed));
5884 LogRel(("%s TX delay expired: %d\n", INSTANCE(pState), pState->uStatTxDelayExp));
5885 LogRel(("%s TX no report asked: %d\n", INSTANCE(pState), pState->uStatTxNoRS));
5886 LogRel(("%s TX abs timer expd : %d\n", INSTANCE(pState), pState->uStatTAD));
5887 LogRel(("%s TX int timer expd : %d\n", INSTANCE(pState), pState->uStatTID));
5888 LogRel(("%s RX abs timer expd : %d\n", INSTANCE(pState), pState->uStatRAD));
5889 LogRel(("%s RX int timer expd : %d\n", INSTANCE(pState), pState->uStatRID));
5890 LogRel(("%s TX CTX descriptors: %d\n", INSTANCE(pState), pState->uStatDescCtx));
5891 LogRel(("%s TX DAT descriptors: %d\n", INSTANCE(pState), pState->uStatDescDat));
5892 LogRel(("%s TX LEG descriptors: %d\n", INSTANCE(pState), pState->uStatDescLeg));
5893 LogRel(("%s Received frames : %d\n", INSTANCE(pState), pState->uStatRxFrm));
5894 LogRel(("%s Transmitted frames: %d\n", INSTANCE(pState), pState->uStatTxFrm));
5895 LogRel(("%s TX frames up to 1514: %d\n", INSTANCE(pState), pState->uStatTx1514));
5896 LogRel(("%s TX frames up to 2962: %d\n", INSTANCE(pState), pState->uStatTx2962));
5897 LogRel(("%s TX frames up to 4410: %d\n", INSTANCE(pState), pState->uStatTx4410));
5898 LogRel(("%s TX frames up to 5858: %d\n", INSTANCE(pState), pState->uStatTx5858));
5899 LogRel(("%s TX frames up to 7306: %d\n", INSTANCE(pState), pState->uStatTx7306));
5900 LogRel(("%s TX frames up to 8754: %d\n", INSTANCE(pState), pState->uStatTx8754));
5901 LogRel(("%s TX frames up to 16384: %d\n", INSTANCE(pState), pState->uStatTx16384));
5902 LogRel(("%s TX frames up to 32768: %d\n", INSTANCE(pState), pState->uStatTx32768));
5903 LogRel(("%s Larger TX frames : %d\n", INSTANCE(pState), pState->uStatTxLarge));
5904 LogRel(("%s Max TX Delay : %lld\n", INSTANCE(pState), pState->uStatMaxTxDelay));
5905#endif /* E1K_INT_STATS */
5906}
5907
5908/**
5909 * Map PCI I/O region.
5910 *
5911 * @return VBox status code.
5912 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
5913 * @param iRegion The region number.
5914 * @param GCPhysAddress Physical address of the region. If iType is PCI_ADDRESS_SPACE_IO, this is an
5915 * I/O port, else it's a physical address.
5916 * This address is *NOT* relative to pci_mem_base like earlier!
5917 * @param cb Region size.
5918 * @param enmType One of the PCI_ADDRESS_SPACE_* values.
5919 * @thread EMT
5920 */
5921static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion,
5922 RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
5923{
5924 int rc;
5925 E1KSTATE *pState = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
5926
5927 switch (enmType)
5928 {
5929 case PCI_ADDRESS_SPACE_IO:
5930 pState->addrIOPort = (RTIOPORT)GCPhysAddress;
5931 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5932 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
5933 if (RT_FAILURE(rc))
5934 break;
5935 if (pState->fR0Enabled)
5936 {
5937 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5938 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5939 if (RT_FAILURE(rc))
5940 break;
5941 }
5942 if (pState->fGCEnabled)
5943 {
5944 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5945 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5946 }
5947 break;
5948 case PCI_ADDRESS_SPACE_MEM:
5949 pState->addrMMReg = GCPhysAddress;
5950 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
5951 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
5952 e1kMMIOWrite, e1kMMIORead, "E1000");
5953 if (pState->fR0Enabled)
5954 {
5955 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
5956 "e1kMMIOWrite", "e1kMMIORead");
5957 if (RT_FAILURE(rc))
5958 break;
5959 }
5960 if (pState->fGCEnabled)
5961 {
5962 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
5963 "e1kMMIOWrite", "e1kMMIORead");
5964 }
5965 break;
5966 default:
5967 /* We should never get here */
5968 AssertMsgFailed(("Invalid PCI address space param in map callback"));
5969 rc = VERR_INTERNAL_ERROR;
5970 break;
5971 }
5972 return rc;
5973}
5974
5975/**
5976 * Check if the device can receive data now.
5977 * This must be called before the pfnRecieve() method is called.
5978 *
5979 * @returns Number of bytes the device can receive.
5980 * @param pInterface Pointer to the interface structure containing the called function pointer.
5981 * @thread EMT
5982 */
5983static int e1kCanReceive(E1KSTATE *pState)
5984{
5985#ifndef E1K_WITH_RXD_CACHE
5986 size_t cb;
5987
5988 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
5989 return VERR_NET_NO_BUFFER_SPACE;
5990
5991 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
5992 {
5993 E1KRXDESC desc;
5994 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
5995 &desc, sizeof(desc));
5996 if (desc.status.fDD)
5997 cb = 0;
5998 else
5999 cb = pState->u16RxBSize;
6000 }
6001 else if (RDH < RDT)
6002 cb = (RDT - RDH) * pState->u16RxBSize;
6003 else if (RDH > RDT)
6004 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pState->u16RxBSize;
6005 else
6006 {
6007 cb = 0;
6008 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
6009 }
6010 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
6011 INSTANCE(pState), RDH, RDT, RDLEN, pState->u16RxBSize, cb));
6012
6013 e1kCsRxLeave(pState);
6014 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
6015#else /* E1K_WITH_RXD_CACHE */
6016 int rc = VINF_SUCCESS;
6017
6018 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
6019 return VERR_NET_NO_BUFFER_SPACE;
6020
6021 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
6022 {
6023 E1KRXDESC desc;
6024 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
6025 &desc, sizeof(desc));
6026 if (desc.status.fDD)
6027 rc = VERR_NET_NO_BUFFER_SPACE;
6028 }
6029 else if (e1kRxDIsCacheEmpty(pState) && RDH == RDT)
6030 {
6031 /* Cache is empty, so is the RX ring. */
6032 rc = VERR_NET_NO_BUFFER_SPACE;
6033 }
6034 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
6035 " u16RxBSize=%d rc=%Rrc\n", INSTANCE(pState),
6036 e1kRxDInCache(pState), RDH, RDT, RDLEN, pState->u16RxBSize, rc));
6037
6038 e1kCsRxLeave(pState);
6039 return rc;
6040#endif /* E1K_WITH_RXD_CACHE */
6041}
6042
6043/**
6044 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
6045 */
6046static DECLCALLBACK(int) e1kNetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
6047{
6048 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6049 int rc = e1kCanReceive(pState);
6050
6051 if (RT_SUCCESS(rc))
6052 return VINF_SUCCESS;
6053 if (RT_UNLIKELY(cMillies == 0))
6054 return VERR_NET_NO_BUFFER_SPACE;
6055
6056 rc = VERR_INTERRUPTED;
6057 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, true);
6058 STAM_PROFILE_START(&pState->StatRxOverflow, a);
6059 VMSTATE enmVMState;
6060 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pState->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
6061 || enmVMState == VMSTATE_RUNNING_LS))
6062 {
6063 int rc2 = e1kCanReceive(pState);
6064 if (RT_SUCCESS(rc2))
6065 {
6066 rc = VINF_SUCCESS;
6067 break;
6068 }
6069 E1kLogRel(("E1000 e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n",
6070 cMillies));
6071 E1kLog(("%s e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n",
6072 INSTANCE(pState), cMillies));
6073 RTSemEventWait(pState->hEventMoreRxDescAvail, cMillies);
6074 }
6075 STAM_PROFILE_STOP(&pState->StatRxOverflow, a);
6076 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, false);
6077
6078 return rc;
6079}
6080
6081
6082/**
6083 * Matches the packet addresses against Receive Address table. Looks for
6084 * exact matches only.
6085 *
6086 * @returns true if address matches.
6087 * @param pState Pointer to the state structure.
6088 * @param pvBuf The ethernet packet.
6089 * @param cb Number of bytes available in the packet.
6090 * @thread EMT
6091 */
6092static bool e1kPerfectMatch(E1KSTATE *pState, const void *pvBuf)
6093{
6094 for (unsigned i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
6095 {
6096 E1KRAELEM* ra = pState->aRecAddr.array + i;
6097
6098 /* Valid address? */
6099 if (ra->ctl & RA_CTL_AV)
6100 {
6101 Assert((ra->ctl & RA_CTL_AS) < 2);
6102 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
6103 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
6104 // INSTANCE(pState), pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
6105 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
6106 /*
6107 * Address Select:
6108 * 00b = Destination address
6109 * 01b = Source address
6110 * 10b = Reserved
6111 * 11b = Reserved
6112 * Since ethernet header is (DA, SA, len) we can use address
6113 * select as index.
6114 */
6115 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
6116 ra->addr, sizeof(ra->addr)) == 0)
6117 return true;
6118 }
6119 }
6120
6121 return false;
6122}
6123
6124/**
6125 * Matches the packet addresses against Multicast Table Array.
6126 *
6127 * @remarks This is imperfect match since it matches not exact address but
6128 * a subset of addresses.
6129 *
6130 * @returns true if address matches.
6131 * @param pState Pointer to the state structure.
6132 * @param pvBuf The ethernet packet.
6133 * @param cb Number of bytes available in the packet.
6134 * @thread EMT
6135 */
6136static bool e1kImperfectMatch(E1KSTATE *pState, const void *pvBuf)
6137{
6138 /* Get bits 32..47 of destination address */
6139 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
6140
6141 unsigned offset = GET_BITS(RCTL, MO);
6142 /*
6143 * offset means:
6144 * 00b = bits 36..47
6145 * 01b = bits 35..46
6146 * 10b = bits 34..45
6147 * 11b = bits 32..43
6148 */
6149 if (offset < 3)
6150 u16Bit = u16Bit >> (4 - offset);
6151 return ASMBitTest(pState->auMTA, u16Bit & 0xFFF);
6152}
6153
6154/**
6155 * Determines if the packet is to be delivered to upper layer. The following
6156 * filters supported:
6157 * - Exact Unicast/Multicast
6158 * - Promiscuous Unicast/Multicast
6159 * - Multicast
6160 * - VLAN
6161 *
6162 * @returns true if packet is intended for this node.
6163 * @param pState Pointer to the state structure.
6164 * @param pvBuf The ethernet packet.
6165 * @param cb Number of bytes available in the packet.
6166 * @param pStatus Bit field to store status bits.
6167 * @thread EMT
6168 */
6169static bool e1kAddressFilter(E1KSTATE *pState, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
6170{
6171 Assert(cb > 14);
6172 /* Assume that we fail to pass exact filter. */
6173 pStatus->fPIF = false;
6174 pStatus->fVP = false;
6175 /* Discard oversized packets */
6176 if (cb > E1K_MAX_RX_PKT_SIZE)
6177 {
6178 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
6179 INSTANCE(pState), cb, E1K_MAX_RX_PKT_SIZE));
6180 E1K_INC_CNT32(ROC);
6181 return false;
6182 }
6183 else if (!(RCTL & RCTL_LPE) && cb > 1522)
6184 {
6185 /* When long packet reception is disabled packets over 1522 are discarded */
6186 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
6187 INSTANCE(pState), cb));
6188 E1K_INC_CNT32(ROC);
6189 return false;
6190 }
6191
6192 uint16_t *u16Ptr = (uint16_t*)pvBuf;
6193 /* Compare TPID with VLAN Ether Type */
6194 if (RT_BE2H_U16(u16Ptr[6]) == VET)
6195 {
6196 pStatus->fVP = true;
6197 /* Is VLAN filtering enabled? */
6198 if (RCTL & RCTL_VFE)
6199 {
6200 /* It is 802.1q packet indeed, let's filter by VID */
6201 if (RCTL & RCTL_CFIEN)
6202 {
6203 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", INSTANCE(pState),
6204 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6205 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6206 !!(RCTL & RCTL_CFI)));
6207 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6208 {
6209 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6210 INSTANCE(pState), E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6211 return false;
6212 }
6213 }
6214 else
6215 E1kLog3(("%s VLAN filter: VLAN=%d\n", INSTANCE(pState),
6216 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6217 if (!ASMBitTest(pState->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6218 {
6219 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6220 INSTANCE(pState), E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6221 return false;
6222 }
6223 }
6224 }
6225 /* Broadcast filtering */
6226 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6227 return true;
6228 E1kLog2(("%s Packet filter: not a broadcast\n", INSTANCE(pState)));
6229 if (e1kIsMulticast(pvBuf))
6230 {
6231 /* Is multicast promiscuous enabled? */
6232 if (RCTL & RCTL_MPE)
6233 return true;
6234 E1kLog2(("%s Packet filter: no promiscuous multicast\n", INSTANCE(pState)));
6235 /* Try perfect matches first */
6236 if (e1kPerfectMatch(pState, pvBuf))
6237 {
6238 pStatus->fPIF = true;
6239 return true;
6240 }
6241 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
6242 if (e1kImperfectMatch(pState, pvBuf))
6243 return true;
6244 E1kLog2(("%s Packet filter: no imperfect match\n", INSTANCE(pState)));
6245 }
6246 else {
6247 /* Is unicast promiscuous enabled? */
6248 if (RCTL & RCTL_UPE)
6249 return true;
6250 E1kLog2(("%s Packet filter: no promiscuous unicast\n", INSTANCE(pState)));
6251 if (e1kPerfectMatch(pState, pvBuf))
6252 {
6253 pStatus->fPIF = true;
6254 return true;
6255 }
6256 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
6257 }
6258 E1kLog2(("%s Packet filter: packet discarded\n", INSTANCE(pState)));
6259 return false;
6260}
6261
6262/**
6263 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6264 */
6265static DECLCALLBACK(int) e1kNetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6266{
6267 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6268 int rc = VINF_SUCCESS;
6269
6270 /*
6271 * Drop packets if the VM is not running yet/anymore.
6272 */
6273 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pState));
6274 if ( enmVMState != VMSTATE_RUNNING
6275 && enmVMState != VMSTATE_RUNNING_LS)
6276 {
6277 E1kLog(("%s Dropping incoming packet as VM is not running.\n", INSTANCE(pState)));
6278 return VINF_SUCCESS;
6279 }
6280
6281 /* Discard incoming packets in locked state */
6282 if (!(RCTL & RCTL_EN) || pState->fLocked || !(STATUS & STATUS_LU))
6283 {
6284 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", INSTANCE(pState)));
6285 return VINF_SUCCESS;
6286 }
6287
6288 STAM_PROFILE_ADV_START(&pState->StatReceive, a);
6289
6290 //if (!e1kCsEnter(pState, RT_SRC_POS))
6291 // return VERR_PERMISSION_DENIED;
6292
6293 e1kPacketDump(pState, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6294
6295 /* Update stats */
6296 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
6297 {
6298 E1K_INC_CNT32(TPR);
6299 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6300 e1kCsLeave(pState);
6301 }
6302 STAM_PROFILE_ADV_START(&pState->StatReceiveFilter, a);
6303 E1KRXDST status;
6304 RT_ZERO(status);
6305 bool fPassed = e1kAddressFilter(pState, pvBuf, cb, &status);
6306 STAM_PROFILE_ADV_STOP(&pState->StatReceiveFilter, a);
6307 if (fPassed)
6308 {
6309 rc = e1kHandleRxPacket(pState, pvBuf, cb, status);
6310 }
6311 //e1kCsLeave(pState);
6312 STAM_PROFILE_ADV_STOP(&pState->StatReceive, a);
6313
6314 return rc;
6315}
6316
6317/**
6318 * Gets the pointer to the status LED of a unit.
6319 *
6320 * @returns VBox status code.
6321 * @param pInterface Pointer to the interface structure.
6322 * @param iLUN The unit which status LED we desire.
6323 * @param ppLed Where to store the LED pointer.
6324 * @thread EMT
6325 */
6326static DECLCALLBACK(int) e1kQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6327{
6328 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6329 int rc = VERR_PDM_LUN_NOT_FOUND;
6330
6331 if (iLUN == 0)
6332 {
6333 *ppLed = &pState->led;
6334 rc = VINF_SUCCESS;
6335 }
6336 return rc;
6337}
6338
6339/**
6340 * Gets the current Media Access Control (MAC) address.
6341 *
6342 * @returns VBox status code.
6343 * @param pInterface Pointer to the interface structure containing the called function pointer.
6344 * @param pMac Where to store the MAC address.
6345 * @thread EMT
6346 */
6347static DECLCALLBACK(int) e1kGetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6348{
6349 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6350 pState->eeprom.getMac(pMac);
6351 return VINF_SUCCESS;
6352}
6353
6354
6355/**
6356 * Gets the new link state.
6357 *
6358 * @returns The current link state.
6359 * @param pInterface Pointer to the interface structure containing the called function pointer.
6360 * @thread EMT
6361 */
6362static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kGetLinkState(PPDMINETWORKCONFIG pInterface)
6363{
6364 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6365 if (STATUS & STATUS_LU)
6366 return PDMNETWORKLINKSTATE_UP;
6367 return PDMNETWORKLINKSTATE_DOWN;
6368}
6369
6370
6371/**
6372 * Sets the new link state.
6373 *
6374 * @returns VBox status code.
6375 * @param pInterface Pointer to the interface structure containing the called function pointer.
6376 * @param enmState The new link state
6377 * @thread EMT
6378 */
6379static DECLCALLBACK(int) e1kSetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6380{
6381 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6382 bool fOldUp = !!(STATUS & STATUS_LU);
6383 bool fNewUp = enmState == PDMNETWORKLINKSTATE_UP;
6384
6385 if ( fNewUp != fOldUp
6386 || (!fNewUp && pState->fCableConnected)) /* old state was connected but STATUS not
6387 * yet written by guest */
6388 {
6389 if (fNewUp)
6390 {
6391 E1kLog(("%s Link will be up in approximately %d secs\n",
6392 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
6393 pState->fCableConnected = true;
6394 STATUS &= ~STATUS_LU;
6395 Phy::setLinkStatus(&pState->phy, false);
6396 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6397 /* Restore the link back in 5 seconds (by default). */
6398 e1kBringLinkUpDelayed(pState);
6399 }
6400 else
6401 {
6402 E1kLog(("%s Link is down\n", INSTANCE(pState)));
6403 pState->fCableConnected = false;
6404 STATUS &= ~STATUS_LU;
6405 Phy::setLinkStatus(&pState->phy, false);
6406 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6407 }
6408 if (pState->pDrvR3)
6409 pState->pDrvR3->pfnNotifyLinkChanged(pState->pDrvR3, enmState);
6410 }
6411 return VINF_SUCCESS;
6412}
6413
6414/**
6415 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6416 */
6417static DECLCALLBACK(void *) e1kQueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6418{
6419 E1KSTATE *pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6420 Assert(&pThis->IBase == pInterface);
6421
6422 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6423 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6424 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6425 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6426 return NULL;
6427}
6428
6429/**
6430 * Saves the configuration.
6431 *
6432 * @param pState The E1K state.
6433 * @param pSSM The handle to the saved state.
6434 */
6435static void e1kSaveConfig(E1KSTATE *pState, PSSMHANDLE pSSM)
6436{
6437 SSMR3PutMem(pSSM, &pState->macConfigured, sizeof(pState->macConfigured));
6438 SSMR3PutU32(pSSM, pState->eChip);
6439}
6440
6441/**
6442 * Live save - save basic configuration.
6443 *
6444 * @returns VBox status code.
6445 * @param pDevIns The device instance.
6446 * @param pSSM The handle to the saved state.
6447 * @param uPass
6448 */
6449static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6450{
6451 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6452 e1kSaveConfig(pState, pSSM);
6453 return VINF_SSM_DONT_CALL_AGAIN;
6454}
6455
6456/**
6457 * Prepares for state saving.
6458 *
6459 * @returns VBox status code.
6460 * @param pDevIns The device instance.
6461 * @param pSSM The handle to the saved state.
6462 */
6463static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6464{
6465 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6466
6467 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
6468 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6469 return rc;
6470 e1kCsLeave(pState);
6471 return VINF_SUCCESS;
6472#if 0
6473 /* 1) Prevent all threads from modifying the state and memory */
6474 //pState->fLocked = true;
6475 /* 2) Cancel all timers */
6476#ifdef E1K_TX_DELAY
6477 e1kCancelTimer(pState, pState->CTX_SUFF(pTXDTimer));
6478#endif /* E1K_TX_DELAY */
6479#ifdef E1K_USE_TX_TIMERS
6480 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
6481#ifndef E1K_NO_TAD
6482 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
6483#endif /* E1K_NO_TAD */
6484#endif /* E1K_USE_TX_TIMERS */
6485#ifdef E1K_USE_RX_TIMERS
6486 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
6487 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
6488#endif /* E1K_USE_RX_TIMERS */
6489 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
6490 /* 3) Did I forget anything? */
6491 E1kLog(("%s Locked\n", INSTANCE(pState)));
6492 return VINF_SUCCESS;
6493#endif
6494}
6495
6496
6497/**
6498 * Saves the state of device.
6499 *
6500 * @returns VBox status code.
6501 * @param pDevIns The device instance.
6502 * @param pSSM The handle to the saved state.
6503 */
6504static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6505{
6506 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6507
6508 e1kSaveConfig(pState, pSSM);
6509 pState->eeprom.save(pSSM);
6510 e1kDumpState(pState);
6511 SSMR3PutMem(pSSM, pState->auRegs, sizeof(pState->auRegs));
6512 SSMR3PutBool(pSSM, pState->fIntRaised);
6513 Phy::saveState(pSSM, &pState->phy);
6514 SSMR3PutU32(pSSM, pState->uSelectedReg);
6515 SSMR3PutMem(pSSM, pState->auMTA, sizeof(pState->auMTA));
6516 SSMR3PutMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6517 SSMR3PutMem(pSSM, pState->auVFTA, sizeof(pState->auVFTA));
6518 SSMR3PutU64(pSSM, pState->u64AckedAt);
6519 SSMR3PutU16(pSSM, pState->u16RxBSize);
6520 //SSMR3PutBool(pSSM, pState->fDelayInts);
6521 //SSMR3PutBool(pSSM, pState->fIntMaskUsed);
6522 SSMR3PutU16(pSSM, pState->u16TxPktLen);
6523/** @todo State wrt to the TSE buffer is incomplete, so little point in
6524 * saving this actually. */
6525 SSMR3PutMem(pSSM, pState->aTxPacketFallback, pState->u16TxPktLen);
6526 SSMR3PutBool(pSSM, pState->fIPcsum);
6527 SSMR3PutBool(pSSM, pState->fTCPcsum);
6528 SSMR3PutMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6529 SSMR3PutMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6530 SSMR3PutBool(pSSM, pState->fVTag);
6531 SSMR3PutU16(pSSM, pState->u16VTagTCI);
6532#ifdef E1K_WITH_TXD_CACHE
6533#if 0
6534 SSMR3PutU8(pSSM, pState->nTxDFetched);
6535 SSMR3PutMem(pSSM, pState->aTxDescriptors,
6536 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6537#else
6538 /*
6539 * There is no point in storing TX descriptor cache entries as we can simply
6540 * fetch them again. Moreover, normally the cache is always empty when we
6541 * save the state. Store zero entries for compatibility.
6542 */
6543 SSMR3PutU8(pSSM, 0);
6544#endif
6545#endif /* E1K_WITH_TXD_CACHE */
6546/**@todo GSO requires some more state here. */
6547 E1kLog(("%s State has been saved\n", INSTANCE(pState)));
6548 return VINF_SUCCESS;
6549}
6550
6551#if 0
6552/**
6553 * Cleanup after saving.
6554 *
6555 * @returns VBox status code.
6556 * @param pDevIns The device instance.
6557 * @param pSSM The handle to the saved state.
6558 */
6559static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6560{
6561 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6562
6563 /* If VM is being powered off unlocking will result in assertions in PGM */
6564 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6565 pState->fLocked = false;
6566 else
6567 E1kLog(("%s VM is not running -- remain locked\n", INSTANCE(pState)));
6568 E1kLog(("%s Unlocked\n", INSTANCE(pState)));
6569 return VINF_SUCCESS;
6570}
6571#endif
6572
6573/**
6574 * Sync with .
6575 *
6576 * @returns VBox status code.
6577 * @param pDevIns The device instance.
6578 * @param pSSM The handle to the saved state.
6579 */
6580static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6581{
6582 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6583
6584 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
6585 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6586 return rc;
6587 e1kCsLeave(pState);
6588 return VINF_SUCCESS;
6589}
6590
6591/**
6592 * Restore previously saved state of device.
6593 *
6594 * @returns VBox status code.
6595 * @param pDevIns The device instance.
6596 * @param pSSM The handle to the saved state.
6597 * @param uVersion The data unit version number.
6598 * @param uPass The data pass.
6599 */
6600static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6601{
6602 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6603 int rc;
6604
6605 if ( uVersion != E1K_SAVEDSTATE_VERSION
6606#ifdef E1K_WITH_TXD_CACHE
6607 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6608#endif /* E1K_WITH_TXD_CACHE */
6609 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6610 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6611 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6612
6613 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6614 || uPass != SSM_PASS_FINAL)
6615 {
6616 /* config checks */
6617 RTMAC macConfigured;
6618 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6619 AssertRCReturn(rc, rc);
6620 if ( memcmp(&macConfigured, &pState->macConfigured, sizeof(macConfigured))
6621 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6622 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", INSTANCE(pState), &pState->macConfigured, &macConfigured));
6623
6624 E1KCHIP eChip;
6625 rc = SSMR3GetU32(pSSM, &eChip);
6626 AssertRCReturn(rc, rc);
6627 if (eChip != pState->eChip)
6628 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pState->eChip, eChip);
6629 }
6630
6631 if (uPass == SSM_PASS_FINAL)
6632 {
6633 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6634 {
6635 rc = pState->eeprom.load(pSSM);
6636 AssertRCReturn(rc, rc);
6637 }
6638 /* the state */
6639 SSMR3GetMem(pSSM, &pState->auRegs, sizeof(pState->auRegs));
6640 SSMR3GetBool(pSSM, &pState->fIntRaised);
6641 /** @todo: PHY could be made a separate device with its own versioning */
6642 Phy::loadState(pSSM, &pState->phy);
6643 SSMR3GetU32(pSSM, &pState->uSelectedReg);
6644 SSMR3GetMem(pSSM, &pState->auMTA, sizeof(pState->auMTA));
6645 SSMR3GetMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6646 SSMR3GetMem(pSSM, &pState->auVFTA, sizeof(pState->auVFTA));
6647 SSMR3GetU64(pSSM, &pState->u64AckedAt);
6648 SSMR3GetU16(pSSM, &pState->u16RxBSize);
6649 //SSMR3GetBool(pSSM, pState->fDelayInts);
6650 //SSMR3GetBool(pSSM, pState->fIntMaskUsed);
6651 SSMR3GetU16(pSSM, &pState->u16TxPktLen);
6652 SSMR3GetMem(pSSM, &pState->aTxPacketFallback[0], pState->u16TxPktLen);
6653 SSMR3GetBool(pSSM, &pState->fIPcsum);
6654 SSMR3GetBool(pSSM, &pState->fTCPcsum);
6655 SSMR3GetMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6656 rc = SSMR3GetMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6657 AssertRCReturn(rc, rc);
6658 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6659 {
6660 SSMR3GetBool(pSSM, &pState->fVTag);
6661 rc = SSMR3GetU16(pSSM, &pState->u16VTagTCI);
6662 AssertRCReturn(rc, rc);
6663 }
6664 else
6665 {
6666 pState->fVTag = false;
6667 pState->u16VTagTCI = 0;
6668 }
6669#ifdef E1K_WITH_TXD_CACHE
6670 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6671 {
6672 rc = SSMR3GetU8(pSSM, &pState->nTxDFetched);
6673 AssertRCReturn(rc, rc);
6674 if (pState->nTxDFetched)
6675 SSMR3GetMem(pSSM, pState->aTxDescriptors,
6676 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6677 }
6678 else
6679 pState->nTxDFetched = 0;
6680 /*
6681 * @todo: Perhaps we should not store TXD cache as the entries can be
6682 * simply fetched again from guest's memory. Or can't they?
6683 */
6684#endif /* E1K_WITH_TXD_CACHE */
6685#ifdef E1K_WITH_RXD_CACHE
6686 /*
6687 * There is no point in storing the RX descriptor cache in the saved
6688 * state, we just need to make sure it is empty.
6689 */
6690 pState->iRxDCurrent = pState->nRxDFetched = 0;
6691#endif /* E1K_WITH_RXD_CACHE */
6692 /* derived state */
6693 e1kSetupGsoCtx(&pState->GsoCtx, &pState->contextTSE);
6694
6695 E1kLog(("%s State has been restored\n", INSTANCE(pState)));
6696 e1kDumpState(pState);
6697 }
6698 return VINF_SUCCESS;
6699}
6700
6701/**
6702 * Link status adjustments after loading.
6703 *
6704 * @returns VBox status code.
6705 * @param pDevIns The device instance.
6706 * @param pSSM The handle to the saved state.
6707 */
6708static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6709{
6710 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6711
6712 /* Update promiscuous mode */
6713 if (pState->pDrvR3)
6714 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3,
6715 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6716
6717 /*
6718 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6719 * passed to us. We go through all this stuff if the link was up and we
6720 * wasn't teleported.
6721 */
6722 if ( (STATUS & STATUS_LU)
6723 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6724 && pState->cMsLinkUpDelay)
6725 {
6726 E1kLog(("%s Link is down temporarily\n", INSTANCE(pState)));
6727 STATUS &= ~STATUS_LU;
6728 Phy::setLinkStatus(&pState->phy, false);
6729 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6730 /* Restore the link back in five seconds (default). */
6731 e1kBringLinkUpDelayed(pState);
6732 }
6733 return VINF_SUCCESS;
6734}
6735
6736
6737/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
6738
6739/**
6740 * Detach notification.
6741 *
6742 * One port on the network card has been disconnected from the network.
6743 *
6744 * @param pDevIns The device instance.
6745 * @param iLUN The logical unit which is being detached.
6746 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
6747 */
6748static DECLCALLBACK(void) e1kDetach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
6749{
6750 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6751 Log(("%s e1kDetach:\n", INSTANCE(pState)));
6752
6753 AssertLogRelReturnVoid(iLUN == 0);
6754
6755 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
6756
6757 /** @todo: r=pritesh still need to check if i missed
6758 * to clean something in this function
6759 */
6760
6761 /*
6762 * Zero some important members.
6763 */
6764 pState->pDrvBase = NULL;
6765 pState->pDrvR3 = NULL;
6766 pState->pDrvR0 = NIL_RTR0PTR;
6767 pState->pDrvRC = NIL_RTRCPTR;
6768
6769 PDMCritSectLeave(&pState->cs);
6770}
6771
6772/**
6773 * Attach the Network attachment.
6774 *
6775 * One port on the network card has been connected to a network.
6776 *
6777 * @returns VBox status code.
6778 * @param pDevIns The device instance.
6779 * @param iLUN The logical unit which is being attached.
6780 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
6781 *
6782 * @remarks This code path is not used during construction.
6783 */
6784static DECLCALLBACK(int) e1kAttach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
6785{
6786 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6787 LogFlow(("%s e1kAttach:\n", INSTANCE(pState)));
6788
6789 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
6790
6791 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
6792
6793 /*
6794 * Attach the driver.
6795 */
6796 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
6797 if (RT_SUCCESS(rc))
6798 {
6799 if (rc == VINF_NAT_DNS)
6800 {
6801#ifdef RT_OS_LINUX
6802 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6803 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6804#else
6805 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6806 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6807#endif
6808 }
6809 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
6810 AssertMsgStmt(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
6811 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
6812 if (RT_SUCCESS(rc))
6813 {
6814 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0);
6815 pState->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
6816
6817 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC);
6818 pState->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
6819 }
6820 }
6821 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
6822 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
6823 {
6824 /* This should never happen because this function is not called
6825 * if there is no driver to attach! */
6826 Log(("%s No attached driver!\n", INSTANCE(pState)));
6827 }
6828
6829 /*
6830 * Temporary set the link down if it was up so that the guest
6831 * will know that we have change the configuration of the
6832 * network card
6833 */
6834 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
6835 {
6836 STATUS &= ~STATUS_LU;
6837 Phy::setLinkStatus(&pState->phy, false);
6838 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6839 /* Restore the link back in 5 seconds (default). */
6840 e1kBringLinkUpDelayed(pState);
6841 }
6842
6843 PDMCritSectLeave(&pState->cs);
6844 return rc;
6845
6846}
6847
6848/**
6849 * @copydoc FNPDMDEVPOWEROFF
6850 */
6851static DECLCALLBACK(void) e1kPowerOff(PPDMDEVINS pDevIns)
6852{
6853 /* Poke thread waiting for buffer space. */
6854 e1kWakeupReceive(pDevIns);
6855}
6856
6857/**
6858 * @copydoc FNPDMDEVRESET
6859 */
6860static DECLCALLBACK(void) e1kReset(PPDMDEVINS pDevIns)
6861{
6862 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6863#ifdef E1K_TX_DELAY
6864 e1kCancelTimer(pState, pState->CTX_SUFF(pTXDTimer));
6865#endif /* E1K_TX_DELAY */
6866 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
6867 e1kCancelTimer(pState, pState->CTX_SUFF(pLUTimer));
6868 e1kXmitFreeBuf(pState);
6869 pState->u16TxPktLen = 0;
6870 pState->fIPcsum = false;
6871 pState->fTCPcsum = false;
6872 pState->fIntMaskUsed = false;
6873 pState->fDelayInts = false;
6874 pState->fLocked = false;
6875 pState->u64AckedAt = 0;
6876 e1kHardReset(pState);
6877}
6878
6879/**
6880 * @copydoc FNPDMDEVSUSPEND
6881 */
6882static DECLCALLBACK(void) e1kSuspend(PPDMDEVINS pDevIns)
6883{
6884 /* Poke thread waiting for buffer space. */
6885 e1kWakeupReceive(pDevIns);
6886}
6887
6888/**
6889 * Device relocation callback.
6890 *
6891 * When this callback is called the device instance data, and if the
6892 * device have a GC component, is being relocated, or/and the selectors
6893 * have been changed. The device must use the chance to perform the
6894 * necessary pointer relocations and data updates.
6895 *
6896 * Before the GC code is executed the first time, this function will be
6897 * called with a 0 delta so GC pointer calculations can be one in one place.
6898 *
6899 * @param pDevIns Pointer to the device instance.
6900 * @param offDelta The relocation delta relative to the old location.
6901 *
6902 * @remark A relocation CANNOT fail.
6903 */
6904static DECLCALLBACK(void) e1kRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
6905{
6906 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6907 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
6908 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
6909 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
6910#ifdef E1K_USE_RX_TIMERS
6911 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
6912 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
6913#endif /* E1K_USE_RX_TIMERS */
6914#ifdef E1K_USE_TX_TIMERS
6915 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
6916# ifndef E1K_NO_TAD
6917 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
6918# endif /* E1K_NO_TAD */
6919#endif /* E1K_USE_TX_TIMERS */
6920#ifdef E1K_TX_DELAY
6921 pState->pTXDTimerRC = TMTimerRCPtr(pState->pTXDTimerR3);
6922#endif /* E1K_TX_DELAY */
6923 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
6924 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
6925}
6926
6927/**
6928 * Destruct a device instance.
6929 *
6930 * We need to free non-VM resources only.
6931 *
6932 * @returns VBox status.
6933 * @param pDevIns The device instance data.
6934 * @thread EMT
6935 */
6936static DECLCALLBACK(int) e1kDestruct(PPDMDEVINS pDevIns)
6937{
6938 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6939 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
6940
6941 e1kDumpState(pState);
6942 E1kLog(("%s Destroying instance\n", INSTANCE(pState)));
6943 if (PDMCritSectIsInitialized(&pState->cs))
6944 {
6945 if (pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
6946 {
6947 RTSemEventSignal(pState->hEventMoreRxDescAvail);
6948 RTSemEventDestroy(pState->hEventMoreRxDescAvail);
6949 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
6950 }
6951#ifdef E1K_WITH_TX_CS
6952 PDMR3CritSectDelete(&pState->csTx);
6953#endif /* E1K_WITH_TX_CS */
6954 PDMR3CritSectDelete(&pState->csRx);
6955 PDMR3CritSectDelete(&pState->cs);
6956 }
6957 return VINF_SUCCESS;
6958}
6959
6960/**
6961 * @copydoc FNRTSTRFORMATTYPE
6962 */
6963static DECLCALLBACK(size_t) e1kFmtRxDesc(PFNRTSTROUTPUT pfnOutput,
6964 void *pvArgOutput,
6965 const char *pszType,
6966 void const *pvValue,
6967 int cchWidth,
6968 int cchPrecision,
6969 unsigned fFlags,
6970 void *pvUser)
6971{
6972 AssertReturn(strcmp(pszType, "e1krxd") == 0, 0);
6973 E1KRXDESC* pDesc = (E1KRXDESC*)pvValue;
6974 if (!pDesc)
6975 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_RXD");
6976
6977 size_t cbPrintf = 0;
6978 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Address=%16LX Length=%04X Csum=%04X\n",
6979 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6980 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x",
6981 pDesc->status.fPIF ? "PIF" : "pif",
6982 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6983 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6984 pDesc->status.fVP ? "VP" : "vp",
6985 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6986 pDesc->status.fEOP ? "EOP" : "eop",
6987 pDesc->status.fDD ? "DD" : "dd",
6988 pDesc->status.fRXE ? "RXE" : "rxe",
6989 pDesc->status.fIPE ? "IPE" : "ipe",
6990 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6991 pDesc->status.fCE ? "CE" : "ce",
6992 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6993 E1K_SPEC_VLAN(pDesc->status.u16Special),
6994 E1K_SPEC_PRI(pDesc->status.u16Special));
6995 return cbPrintf;
6996}
6997
6998/**
6999 * @copydoc FNRTSTRFORMATTYPE
7000 */
7001static DECLCALLBACK(size_t) e1kFmtTxDesc(PFNRTSTROUTPUT pfnOutput,
7002 void *pvArgOutput,
7003 const char *pszType,
7004 void const *pvValue,
7005 int cchWidth,
7006 int cchPrecision,
7007 unsigned fFlags,
7008 void *pvUser)
7009{
7010 AssertReturn(strcmp(pszType, "e1ktxd") == 0, 0);
7011 E1KTXDESC* pDesc = (E1KTXDESC*)pvValue;
7012 if (!pDesc)
7013 return RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "NULL_TXD");
7014
7015 size_t cbPrintf = 0;
7016 switch (e1kGetDescType(pDesc))
7017 {
7018 case E1K_DTYP_CONTEXT:
7019 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Context\n"
7020 " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n"
7021 " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s",
7022 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
7023 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE,
7024 pDesc->context.dw2.fIDE ? " IDE":"",
7025 pDesc->context.dw2.fRS ? " RS" :"",
7026 pDesc->context.dw2.fTSE ? " TSE":"",
7027 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
7028 pDesc->context.dw2.fTCP ? "TCP":"UDP",
7029 pDesc->context.dw2.u20PAYLEN,
7030 pDesc->context.dw3.u8HDRLEN,
7031 pDesc->context.dw3.u16MSS,
7032 pDesc->context.dw3.fDD?"DD":"");
7033 break;
7034 case E1K_DTYP_DATA:
7035 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Data Address=%16LX DTALEN=%05X\n"
7036 " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x",
7037 pDesc->data.u64BufAddr,
7038 pDesc->data.cmd.u20DTALEN,
7039 pDesc->data.cmd.fIDE ? " IDE" :"",
7040 pDesc->data.cmd.fVLE ? " VLE" :"",
7041 pDesc->data.cmd.fRPS ? " RPS" :"",
7042 pDesc->data.cmd.fRS ? " RS" :"",
7043 pDesc->data.cmd.fTSE ? " TSE" :"",
7044 pDesc->data.cmd.fIFCS? " IFCS":"",
7045 pDesc->data.cmd.fEOP ? " EOP" :"",
7046 pDesc->data.dw3.fDD ? " DD" :"",
7047 pDesc->data.dw3.fEC ? " EC" :"",
7048 pDesc->data.dw3.fLC ? " LC" :"",
7049 pDesc->data.dw3.fTXSM? " TXSM":"",
7050 pDesc->data.dw3.fIXSM? " IXSM":"",
7051 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
7052 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
7053 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
7054 break;
7055 case E1K_DTYP_LEGACY:
7056 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Type=Legacy Address=%16LX DTALEN=%05X\n"
7057 " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x",
7058 pDesc->data.u64BufAddr,
7059 pDesc->legacy.cmd.u16Length,
7060 pDesc->legacy.cmd.fIDE ? " IDE" :"",
7061 pDesc->legacy.cmd.fVLE ? " VLE" :"",
7062 pDesc->legacy.cmd.fRPS ? " RPS" :"",
7063 pDesc->legacy.cmd.fRS ? " RS" :"",
7064 pDesc->legacy.cmd.fIC ? " IC" :"",
7065 pDesc->legacy.cmd.fIFCS? " IFCS":"",
7066 pDesc->legacy.cmd.fEOP ? " EOP" :"",
7067 pDesc->legacy.dw3.fDD ? " DD" :"",
7068 pDesc->legacy.dw3.fEC ? " EC" :"",
7069 pDesc->legacy.dw3.fLC ? " LC" :"",
7070 pDesc->legacy.cmd.u8CSO,
7071 pDesc->legacy.dw3.u8CSS,
7072 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
7073 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
7074 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
7075 break;
7076 default:
7077 cbPrintf += RTStrFormat(pfnOutput, pvArgOutput, NULL, 0, "Invalid Transmit Descriptor");
7078 break;
7079 }
7080
7081 return cbPrintf;
7082}
7083
7084/**
7085 * Status info callback.
7086 *
7087 * @param pDevIns The device instance.
7088 * @param pHlp The output helpers.
7089 * @param pszArgs The arguments.
7090 */
7091static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
7092{
7093 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7094 unsigned i;
7095 // bool fRcvRing = false;
7096 // bool fXmtRing = false;
7097
7098 /*
7099 * Parse args.
7100 if (pszArgs)
7101 {
7102 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
7103 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
7104 }
7105 */
7106
7107 /*
7108 * Show info.
7109 */
7110 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
7111 pDevIns->iInstance, pState->addrIOPort, pState->addrMMReg,
7112 &pState->macConfigured, g_Chips[pState->eChip].pcszName,
7113 pState->fGCEnabled ? " GC" : "", pState->fR0Enabled ? " R0" : "");
7114
7115 e1kCsEnter(pState, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
7116
7117 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
7118 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", s_e1kRegMap[i].abbrev, pState->auRegs[i]);
7119
7120 for (i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
7121 {
7122 E1KRAELEM* ra = pState->aRecAddr.array + i;
7123 if (ra->ctl & RA_CTL_AV)
7124 {
7125 const char *pcszTmp;
7126 switch (ra->ctl & RA_CTL_AS)
7127 {
7128 case 0: pcszTmp = "DST"; break;
7129 case 1: pcszTmp = "SRC"; break;
7130 default: pcszTmp = "reserved";
7131 }
7132 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
7133 }
7134 }
7135 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
7136 uint32_t rdh = RDH;
7137 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
7138 for (i = 0; i < cDescs; ++i)
7139 {
7140 E1KRXDESC desc;
7141 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
7142 &desc, sizeof(desc));
7143 if (i == rdh)
7144 pHlp->pfnPrintf(pHlp, ">>> ");
7145 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n", e1kDescAddr(RDBAH, RDBAL, i), &desc);
7146 }
7147 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors in Cache (at %d (RDH %d)/ fetched %d / max %d) --\n",
7148 pState->iRxDCurrent, RDH, pState->nRxDFetched, E1K_RXD_CACHE_SIZE);
7149 if (rdh > pState->iRxDCurrent)
7150 rdh -= pState->iRxDCurrent;
7151 else
7152 rdh = cDescs + rdh - pState->iRxDCurrent;
7153 for (i = 0; i < pState->nRxDFetched; ++i)
7154 {
7155 if (i == pState->iRxDCurrent)
7156 pHlp->pfnPrintf(pHlp, ">>> ");
7157 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1krxd]\n",
7158 e1kDescAddr(RDBAH, RDBAL, rdh++ % cDescs),
7159 &pState->aRxDescriptors[i]);
7160 }
7161
7162 cDescs = TDLEN / sizeof(E1KTXDESC);
7163 uint32_t tdh = TDH;
7164 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
7165 for (i = 0; i < cDescs; ++i)
7166 {
7167 E1KTXDESC desc;
7168 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
7169 &desc, sizeof(desc));
7170 if (i == tdh)
7171 pHlp->pfnPrintf(pHlp, ">>> ");
7172 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n", e1kDescAddr(TDBAH, TDBAL, i), &desc);
7173 }
7174 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors in Cache (at %d (TDH %d)/ fetched %d / max %d) --\n",
7175 pState->iTxDCurrent, TDH, pState->nTxDFetched, E1K_TXD_CACHE_SIZE);
7176 if (tdh > pState->iTxDCurrent)
7177 tdh -= pState->iTxDCurrent;
7178 else
7179 tdh = cDescs + tdh - pState->iTxDCurrent;
7180 for (i = 0; i < pState->nTxDFetched; ++i)
7181 {
7182 if (i == pState->iTxDCurrent)
7183 pHlp->pfnPrintf(pHlp, ">>> ");
7184 pHlp->pfnPrintf(pHlp, "%RGp: %R[e1ktxd]\n",
7185 e1kDescAddr(TDBAH, TDBAL, tdh++ % cDescs),
7186 &pState->aTxDescriptors[i]);
7187 }
7188
7189
7190#ifdef E1K_INT_STATS
7191 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pState->uStatIntTry);
7192 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pState->uStatInt);
7193 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pState->uStatIntLower);
7194 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pState->uStatIntDly);
7195 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pState->uStatDisDly);
7196 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pState->uStatIntSkip);
7197 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pState->uStatIntMasked);
7198 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pState->uStatIntEarly);
7199 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pState->uStatIntLate);
7200 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pState->iStatIntLost);
7201 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pState->uStatIntRx);
7202 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pState->uStatIntTx);
7203 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pState->uStatIntICS);
7204 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pState->uStatIntRDTR);
7205 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pState->uStatIntRXDMT0);
7206 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pState->uStatIntTXQE);
7207 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pState->uStatTxIDE);
7208 pHlp->pfnPrintf(pHlp, "TX delayed: %d\n", pState->uStatTxDelayed);
7209 pHlp->pfnPrintf(pHlp, "TX delayed expired: %d\n", pState->uStatTxDelayExp);
7210 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pState->uStatTxNoRS);
7211 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pState->uStatTAD);
7212 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pState->uStatTID);
7213 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pState->uStatRAD);
7214 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pState->uStatRID);
7215 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pState->uStatDescCtx);
7216 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pState->uStatDescDat);
7217 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pState->uStatDescLeg);
7218 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pState->uStatRxFrm);
7219 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pState->uStatTxFrm);
7220 pHlp->pfnPrintf(pHlp, "TX frames up to 1514: %d\n", pState->uStatTx1514);
7221 pHlp->pfnPrintf(pHlp, "TX frames up to 2962: %d\n", pState->uStatTx2962);
7222 pHlp->pfnPrintf(pHlp, "TX frames up to 4410: %d\n", pState->uStatTx4410);
7223 pHlp->pfnPrintf(pHlp, "TX frames up to 5858: %d\n", pState->uStatTx5858);
7224 pHlp->pfnPrintf(pHlp, "TX frames up to 7306: %d\n", pState->uStatTx7306);
7225 pHlp->pfnPrintf(pHlp, "TX frames up to 8754: %d\n", pState->uStatTx8754);
7226 pHlp->pfnPrintf(pHlp, "TX frames up to 16384: %d\n", pState->uStatTx16384);
7227 pHlp->pfnPrintf(pHlp, "TX frames up to 32768: %d\n", pState->uStatTx32768);
7228 pHlp->pfnPrintf(pHlp, "Larger TX frames : %d\n", pState->uStatTxLarge);
7229#endif /* E1K_INT_STATS */
7230
7231 e1kCsLeave(pState);
7232}
7233
7234/**
7235 * Sets 8-bit register in PCI configuration space.
7236 * @param refPciDev The PCI device.
7237 * @param uOffset The register offset.
7238 * @param u16Value The value to store in the register.
7239 * @thread EMT
7240 */
7241DECLINLINE(void) e1kPCICfgSetU8(PCIDEVICE& refPciDev, uint32_t uOffset, uint8_t u8Value)
7242{
7243 Assert(uOffset < sizeof(refPciDev.config));
7244 refPciDev.config[uOffset] = u8Value;
7245}
7246
7247/**
7248 * Sets 16-bit register in PCI configuration space.
7249 * @param refPciDev The PCI device.
7250 * @param uOffset The register offset.
7251 * @param u16Value The value to store in the register.
7252 * @thread EMT
7253 */
7254DECLINLINE(void) e1kPCICfgSetU16(PCIDEVICE& refPciDev, uint32_t uOffset, uint16_t u16Value)
7255{
7256 Assert(uOffset+sizeof(u16Value) <= sizeof(refPciDev.config));
7257 *(uint16_t*)&refPciDev.config[uOffset] = u16Value;
7258}
7259
7260/**
7261 * Sets 32-bit register in PCI configuration space.
7262 * @param refPciDev The PCI device.
7263 * @param uOffset The register offset.
7264 * @param u32Value The value to store in the register.
7265 * @thread EMT
7266 */
7267DECLINLINE(void) e1kPCICfgSetU32(PCIDEVICE& refPciDev, uint32_t uOffset, uint32_t u32Value)
7268{
7269 Assert(uOffset+sizeof(u32Value) <= sizeof(refPciDev.config));
7270 *(uint32_t*)&refPciDev.config[uOffset] = u32Value;
7271}
7272
7273/**
7274 * Set PCI configuration space registers.
7275 *
7276 * @param pci Reference to PCI device structure.
7277 * @thread EMT
7278 */
7279static DECLCALLBACK(void) e1kConfigurePCI(PCIDEVICE& pci, E1KCHIP eChip)
7280{
7281 Assert(eChip < RT_ELEMENTS(g_Chips));
7282 /* Configure PCI Device, assume 32-bit mode ******************************/
7283 PCIDevSetVendorId(&pci, g_Chips[eChip].uPCIVendorId);
7284 PCIDevSetDeviceId(&pci, g_Chips[eChip].uPCIDeviceId);
7285 e1kPCICfgSetU16(pci, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_Chips[eChip].uPCISubsystemVendorId);
7286 e1kPCICfgSetU16(pci, VBOX_PCI_SUBSYSTEM_ID, g_Chips[eChip].uPCISubsystemId);
7287
7288 e1kPCICfgSetU16(pci, VBOX_PCI_COMMAND, 0x0000);
7289 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7290 e1kPCICfgSetU16(pci, VBOX_PCI_STATUS,
7291 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7292 /* Stepping A2 */
7293 e1kPCICfgSetU8( pci, VBOX_PCI_REVISION_ID, 0x02);
7294 /* Ethernet adapter */
7295 e1kPCICfgSetU8( pci, VBOX_PCI_CLASS_PROG, 0x00);
7296 e1kPCICfgSetU16(pci, VBOX_PCI_CLASS_DEVICE, 0x0200);
7297 /* normal single function Ethernet controller */
7298 e1kPCICfgSetU8( pci, VBOX_PCI_HEADER_TYPE, 0x00);
7299 /* Memory Register Base Address */
7300 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7301 /* Memory Flash Base Address */
7302 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7303 /* IO Register Base Address */
7304 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7305 /* Expansion ROM Base Address */
7306 e1kPCICfgSetU32(pci, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7307 /* Capabilities Pointer */
7308 e1kPCICfgSetU8( pci, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7309 /* Interrupt Pin: INTA# */
7310 e1kPCICfgSetU8( pci, VBOX_PCI_INTERRUPT_PIN, 0x01);
7311 /* Max_Lat/Min_Gnt: very high priority and time slice */
7312 e1kPCICfgSetU8( pci, VBOX_PCI_MIN_GNT, 0xFF);
7313 e1kPCICfgSetU8( pci, VBOX_PCI_MAX_LAT, 0x00);
7314
7315 /* PCI Power Management Registers ****************************************/
7316 /* Capability ID: PCI Power Management Registers */
7317 e1kPCICfgSetU8( pci, 0xDC, VBOX_PCI_CAP_ID_PM);
7318 /* Next Item Pointer: PCI-X */
7319 e1kPCICfgSetU8( pci, 0xDC + 1, 0xE4);
7320 /* Power Management Capabilities: PM disabled, DSI */
7321 e1kPCICfgSetU16(pci, 0xDC + 2,
7322 0x0002 | VBOX_PCI_PM_CAP_DSI);
7323 /* Power Management Control / Status Register: PM disabled */
7324 e1kPCICfgSetU16(pci, 0xDC + 4, 0x0000);
7325 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7326 e1kPCICfgSetU8( pci, 0xDC + 6, 0x00);
7327 /* Data Register: PM disabled, always 0 */
7328 e1kPCICfgSetU8( pci, 0xDC + 7, 0x00);
7329
7330 /* PCI-X Configuration Registers *****************************************/
7331 /* Capability ID: PCI-X Configuration Registers */
7332 e1kPCICfgSetU8( pci, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7333#ifdef E1K_WITH_MSI
7334 e1kPCICfgSetU8( pci, 0xE4 + 1, 0x80);
7335#else
7336 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7337 e1kPCICfgSetU8( pci, 0xE4 + 1, 0x00);
7338#endif
7339 /* PCI-X Command: Enable Relaxed Ordering */
7340 e1kPCICfgSetU16(pci, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7341 /* PCI-X Status: 32-bit, 66MHz*/
7342 /// @todo: is this value really correct? fff8 doesn't look like actual PCI address
7343 e1kPCICfgSetU32(pci, 0xE4 + 4, 0x0040FFF8);
7344}
7345
7346static int e1kInitDebugHelpers()
7347{
7348 int rc = VINF_SUCCESS;
7349 static bool g_fHelpersRegistered = false;
7350 if (g_fHelpersRegistered)
7351 return rc;
7352 g_fHelpersRegistered = true;
7353 rc = RTStrFormatTypeRegister("e1krxd", e1kFmtRxDesc, NULL);
7354 AssertRCReturn(rc, rc);
7355 rc = RTStrFormatTypeRegister("e1ktxd", e1kFmtTxDesc, NULL);
7356 AssertRC(rc);
7357 return rc;
7358}
7359
7360/**
7361 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7362 */
7363static DECLCALLBACK(int) e1kConstruct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7364{
7365 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7366 int rc;
7367 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7368
7369 /* Init handles and log related stuff. */
7370 RTStrPrintf(pState->szInstance, sizeof(pState->szInstance), "E1000#%d", iInstance);
7371 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", INSTANCE(pState), sizeof(E1KRXDESC)));
7372 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7373
7374 /*
7375 * Validate configuration.
7376 */
7377 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7378 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7379 "EthernetCRC\0" "GSOEnabled\0" "LinkUpDelay\0"))
7380 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7381 N_("Invalid configuration for E1000 device"));
7382
7383 /** @todo: LineSpeed unused! */
7384
7385 pState->fR0Enabled = true;
7386 pState->fGCEnabled = true;
7387 pState->fEthernetCRC = true;
7388 pState->fGSOEnabled = true;
7389
7390 /* Get config params */
7391 rc = CFGMR3QueryBytes(pCfg, "MAC", pState->macConfigured.au8,
7392 sizeof(pState->macConfigured.au8));
7393 if (RT_FAILURE(rc))
7394 return PDMDEV_SET_ERROR(pDevIns, rc,
7395 N_("Configuration error: Failed to get MAC address"));
7396 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pState->fCableConnected);
7397 if (RT_FAILURE(rc))
7398 return PDMDEV_SET_ERROR(pDevIns, rc,
7399 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7400 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pState->eChip);
7401 if (RT_FAILURE(rc))
7402 return PDMDEV_SET_ERROR(pDevIns, rc,
7403 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7404 Assert(pState->eChip <= E1K_CHIP_82545EM);
7405 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pState->fGCEnabled, true);
7406 if (RT_FAILURE(rc))
7407 return PDMDEV_SET_ERROR(pDevIns, rc,
7408 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7409
7410 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pState->fR0Enabled, true);
7411 if (RT_FAILURE(rc))
7412 return PDMDEV_SET_ERROR(pDevIns, rc,
7413 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7414
7415 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pState->fEthernetCRC, true);
7416 if (RT_FAILURE(rc))
7417 return PDMDEV_SET_ERROR(pDevIns, rc,
7418 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7419
7420 rc = CFGMR3QueryBoolDef(pCfg, "GSOEnabled", &pState->fGSOEnabled, true);
7421 if (RT_FAILURE(rc))
7422 return PDMDEV_SET_ERROR(pDevIns, rc,
7423 N_("Configuration error: Failed to get the value of 'GSOEnabled'"));
7424
7425 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pState->cMsLinkUpDelay, 5000); /* ms */
7426 if (RT_FAILURE(rc))
7427 return PDMDEV_SET_ERROR(pDevIns, rc,
7428 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7429 Assert(pState->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7430 if (pState->cMsLinkUpDelay > 5000)
7431 {
7432 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n",
7433 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
7434 }
7435 else if (pState->cMsLinkUpDelay == 0)
7436 {
7437 LogRel(("%s WARNING! Link up delay is disabled!\n", INSTANCE(pState)));
7438 }
7439
7440 E1kLog(("%s Chip=%s LinkUpDelay=%ums EthernetCRC=%s GSO=%s\n", INSTANCE(pState),
7441 g_Chips[pState->eChip].pcszName, pState->cMsLinkUpDelay,
7442 pState->fEthernetCRC ? "on" : "off",
7443 pState->fGSOEnabled ? "enabled" : "disabled"));
7444
7445 /* Initialize state structure */
7446 pState->pDevInsR3 = pDevIns;
7447 pState->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7448 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7449 pState->u16TxPktLen = 0;
7450 pState->fIPcsum = false;
7451 pState->fTCPcsum = false;
7452 pState->fIntMaskUsed = false;
7453 pState->fDelayInts = false;
7454 pState->fLocked = false;
7455 pState->u64AckedAt = 0;
7456 pState->led.u32Magic = PDMLED_MAGIC;
7457 pState->u32PktNo = 1;
7458
7459#ifdef E1K_INT_STATS
7460 pState->uStatInt = 0;
7461 pState->uStatIntTry = 0;
7462 pState->uStatIntLower = 0;
7463 pState->uStatIntDly = 0;
7464 pState->uStatDisDly = 0;
7465 pState->iStatIntLost = 0;
7466 pState->iStatIntLostOne = 0;
7467 pState->uStatIntLate = 0;
7468 pState->uStatIntMasked = 0;
7469 pState->uStatIntEarly = 0;
7470 pState->uStatIntRx = 0;
7471 pState->uStatIntTx = 0;
7472 pState->uStatIntICS = 0;
7473 pState->uStatIntRDTR = 0;
7474 pState->uStatIntRXDMT0 = 0;
7475 pState->uStatIntTXQE = 0;
7476 pState->uStatTxNoRS = 0;
7477 pState->uStatTxIDE = 0;
7478 pState->uStatTxDelayed = 0;
7479 pState->uStatTxDelayExp = 0;
7480 pState->uStatTAD = 0;
7481 pState->uStatTID = 0;
7482 pState->uStatRAD = 0;
7483 pState->uStatRID = 0;
7484 pState->uStatRxFrm = 0;
7485 pState->uStatTxFrm = 0;
7486 pState->uStatDescCtx = 0;
7487 pState->uStatDescDat = 0;
7488 pState->uStatDescLeg = 0;
7489 pState->uStatTx1514 = 0;
7490 pState->uStatTx2962 = 0;
7491 pState->uStatTx4410 = 0;
7492 pState->uStatTx5858 = 0;
7493 pState->uStatTx7306 = 0;
7494 pState->uStatTx8754 = 0;
7495 pState->uStatTx16384 = 0;
7496 pState->uStatTx32768 = 0;
7497 pState->uStatTxLarge = 0;
7498 pState->uStatMaxTxDelay = 0;
7499#endif /* E1K_INT_STATS */
7500
7501 /* Interfaces */
7502 pState->IBase.pfnQueryInterface = e1kQueryInterface;
7503
7504 pState->INetworkDown.pfnWaitReceiveAvail = e1kNetworkDown_WaitReceiveAvail;
7505 pState->INetworkDown.pfnReceive = e1kNetworkDown_Receive;
7506 pState->INetworkDown.pfnXmitPending = e1kNetworkDown_XmitPending;
7507
7508 pState->ILeds.pfnQueryStatusLed = e1kQueryStatusLed;
7509
7510 pState->INetworkConfig.pfnGetMac = e1kGetMac;
7511 pState->INetworkConfig.pfnGetLinkState = e1kGetLinkState;
7512 pState->INetworkConfig.pfnSetLinkState = e1kSetLinkState;
7513
7514 /* Initialize the EEPROM */
7515 pState->eeprom.init(pState->macConfigured);
7516
7517 /* Initialize internal PHY */
7518 Phy::init(&pState->phy, iInstance,
7519 pState->eChip == E1K_CHIP_82543GC?
7520 PHY_EPID_M881000 : PHY_EPID_M881011);
7521 Phy::setLinkStatus(&pState->phy, pState->fCableConnected);
7522
7523 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7524 NULL, e1kLiveExec, NULL,
7525 e1kSavePrep, e1kSaveExec, NULL,
7526 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7527 if (RT_FAILURE(rc))
7528 return rc;
7529
7530 /* Initialize critical section */
7531 rc = PDMDevHlpCritSectInit(pDevIns, &pState->cs, RT_SRC_POS, "%s", pState->szInstance);
7532 if (RT_FAILURE(rc))
7533 return rc;
7534 rc = PDMDevHlpCritSectInit(pDevIns, &pState->csRx, RT_SRC_POS, "%sRX", pState->szInstance);
7535 if (RT_FAILURE(rc))
7536 return rc;
7537#ifdef E1K_WITH_TX_CS
7538 rc = PDMDevHlpCritSectInit(pDevIns, &pState->csTx, RT_SRC_POS, "%sTX", pState->szInstance);
7539 if (RT_FAILURE(rc))
7540 return rc;
7541#endif /* E1K_WITH_TX_CS */
7542
7543 /* Set PCI config registers */
7544 e1kConfigurePCI(pState->pciDevice, pState->eChip);
7545 /* Register PCI device */
7546 rc = PDMDevHlpPCIRegister(pDevIns, &pState->pciDevice);
7547 if (RT_FAILURE(rc))
7548 return rc;
7549
7550#ifdef E1K_WITH_MSI
7551 PDMMSIREG aMsiReg;
7552 aMsiReg.cMsiVectors = 1;
7553 aMsiReg.iMsiCapOffset = 0x80;
7554 aMsiReg.iMsiNextOffset = 0x0;
7555 aMsiReg.fMsi64bit = false;
7556 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg);
7557 AssertRC(rc);
7558 if (RT_FAILURE (rc))
7559 return rc;
7560#endif
7561
7562
7563 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7564 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE,
7565 PCI_ADDRESS_SPACE_MEM, e1kMap);
7566 if (RT_FAILURE(rc))
7567 return rc;
7568 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7569 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE,
7570 PCI_ADDRESS_SPACE_IO, e1kMap);
7571 if (RT_FAILURE(rc))
7572 return rc;
7573
7574 /* Create transmit queue */
7575 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7576 e1kTxQueueConsumer, true, "E1000-Xmit", &pState->pTxQueueR3);
7577 if (RT_FAILURE(rc))
7578 return rc;
7579 pState->pTxQueueR0 = PDMQueueR0Ptr(pState->pTxQueueR3);
7580 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
7581
7582 /* Create the RX notifier signaller. */
7583 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7584 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pState->pCanRxQueueR3);
7585 if (RT_FAILURE(rc))
7586 return rc;
7587 pState->pCanRxQueueR0 = PDMQueueR0Ptr(pState->pCanRxQueueR3);
7588 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
7589
7590#ifdef E1K_TX_DELAY
7591 /* Create Transmit Delay Timer */
7592 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxDelayTimer, pState,
7593 TMTIMER_FLAGS_NO_CRIT_SECT,
7594 "E1000 Transmit Delay Timer", &pState->pTXDTimerR3);
7595 if (RT_FAILURE(rc))
7596 return rc;
7597 pState->pTXDTimerR0 = TMTimerR0Ptr(pState->pTXDTimerR3);
7598 pState->pTXDTimerRC = TMTimerRCPtr(pState->pTXDTimerR3);
7599 TMR3TimerSetCritSect(pState->pTXDTimerR3, &pState->csTx);
7600#endif /* E1K_TX_DELAY */
7601
7602#ifdef E1K_USE_TX_TIMERS
7603 /* Create Transmit Interrupt Delay Timer */
7604 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pState,
7605 TMTIMER_FLAGS_NO_CRIT_SECT,
7606 "E1000 Transmit Interrupt Delay Timer", &pState->pTIDTimerR3);
7607 if (RT_FAILURE(rc))
7608 return rc;
7609 pState->pTIDTimerR0 = TMTimerR0Ptr(pState->pTIDTimerR3);
7610 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
7611
7612# ifndef E1K_NO_TAD
7613 /* Create Transmit Absolute Delay Timer */
7614 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pState,
7615 TMTIMER_FLAGS_NO_CRIT_SECT,
7616 "E1000 Transmit Absolute Delay Timer", &pState->pTADTimerR3);
7617 if (RT_FAILURE(rc))
7618 return rc;
7619 pState->pTADTimerR0 = TMTimerR0Ptr(pState->pTADTimerR3);
7620 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
7621# endif /* E1K_NO_TAD */
7622#endif /* E1K_USE_TX_TIMERS */
7623
7624#ifdef E1K_USE_RX_TIMERS
7625 /* Create Receive Interrupt Delay Timer */
7626 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pState,
7627 TMTIMER_FLAGS_NO_CRIT_SECT,
7628 "E1000 Receive Interrupt Delay Timer", &pState->pRIDTimerR3);
7629 if (RT_FAILURE(rc))
7630 return rc;
7631 pState->pRIDTimerR0 = TMTimerR0Ptr(pState->pRIDTimerR3);
7632 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
7633
7634 /* Create Receive Absolute Delay Timer */
7635 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pState,
7636 TMTIMER_FLAGS_NO_CRIT_SECT,
7637 "E1000 Receive Absolute Delay Timer", &pState->pRADTimerR3);
7638 if (RT_FAILURE(rc))
7639 return rc;
7640 pState->pRADTimerR0 = TMTimerR0Ptr(pState->pRADTimerR3);
7641 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
7642#endif /* E1K_USE_RX_TIMERS */
7643
7644 /* Create Late Interrupt Timer */
7645 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pState,
7646 TMTIMER_FLAGS_NO_CRIT_SECT,
7647 "E1000 Late Interrupt Timer", &pState->pIntTimerR3);
7648 if (RT_FAILURE(rc))
7649 return rc;
7650 pState->pIntTimerR0 = TMTimerR0Ptr(pState->pIntTimerR3);
7651 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
7652
7653 /* Create Link Up Timer */
7654 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pState,
7655 TMTIMER_FLAGS_NO_CRIT_SECT,
7656 "E1000 Link Up Timer", &pState->pLUTimerR3);
7657 if (RT_FAILURE(rc))
7658 return rc;
7659 pState->pLUTimerR0 = TMTimerR0Ptr(pState->pLUTimerR3);
7660 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
7661
7662 /* Register the info item */
7663 char szTmp[20];
7664 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7665 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7666
7667 /* Status driver */
7668 PPDMIBASE pBase;
7669 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pState->IBase, &pBase, "Status Port");
7670 if (RT_FAILURE(rc))
7671 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7672 pState->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7673
7674 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
7675 if (RT_SUCCESS(rc))
7676 {
7677 if (rc == VINF_NAT_DNS)
7678 {
7679 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7680 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7681 }
7682 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
7683 AssertMsgReturn(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7684 VERR_PDM_MISSING_INTERFACE_BELOW);
7685
7686 pState->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7687 pState->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7688 }
7689 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7690 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7691 {
7692 /* No error! */
7693 E1kLog(("%s This adapter is not attached to any network!\n", INSTANCE(pState)));
7694 }
7695 else
7696 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7697
7698 rc = RTSemEventCreate(&pState->hEventMoreRxDescAvail);
7699 if (RT_FAILURE(rc))
7700 return rc;
7701
7702 rc = e1kInitDebugHelpers();
7703 if (RT_FAILURE(rc))
7704 return rc;
7705
7706 e1kHardReset(pState);
7707
7708#if defined(VBOX_WITH_STATISTICS)
7709 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7710 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7711 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7712 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7713 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7714 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7715 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7716 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7717 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7718 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7719 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7720 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7721 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7722 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7723 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7724 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7725 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7726 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7727 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7728 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7729#endif /* VBOX_WITH_STATISTICS */
7730 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7731#if defined(VBOX_WITH_STATISTICS)
7732 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7733 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7734#endif /* VBOX_WITH_STATISTICS */
7735 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7736#if defined(VBOX_WITH_STATISTICS)
7737 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7738 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7739
7740 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7741 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7742 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7743 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7744 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7745 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7746 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7747 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7748 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7749#endif /* VBOX_WITH_STATISTICS */
7750
7751 return VINF_SUCCESS;
7752}
7753
7754/**
7755 * The device registration structure.
7756 */
7757const PDMDEVREG g_DeviceE1000 =
7758{
7759 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7760 PDM_DEVREG_VERSION,
7761 /* Device name. */
7762 "e1000",
7763 /* Name of guest context module (no path).
7764 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7765 "VBoxDDGC.gc",
7766 /* Name of ring-0 module (no path).
7767 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7768 "VBoxDDR0.r0",
7769 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7770 * remain unchanged from registration till VM destruction. */
7771 "Intel PRO/1000 MT Desktop Ethernet.\n",
7772
7773 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7774 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7775 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7776 PDM_DEVREG_CLASS_NETWORK,
7777 /* Maximum number of instances (per VM). */
7778 ~0U,
7779 /* Size of the instance data. */
7780 sizeof(E1KSTATE),
7781
7782 /* Construct instance - required. */
7783 e1kConstruct,
7784 /* Destruct instance - optional. */
7785 e1kDestruct,
7786 /* Relocation command - optional. */
7787 e1kRelocate,
7788 /* I/O Control interface - optional. */
7789 NULL,
7790 /* Power on notification - optional. */
7791 NULL,
7792 /* Reset notification - optional. */
7793 e1kReset,
7794 /* Suspend notification - optional. */
7795 e1kSuspend,
7796 /* Resume notification - optional. */
7797 NULL,
7798 /* Attach command - optional. */
7799 e1kAttach,
7800 /* Detach notification - optional. */
7801 e1kDetach,
7802 /* Query a LUN base interface - optional. */
7803 NULL,
7804 /* Init complete notification - optional. */
7805 NULL,
7806 /* Power off notification - optional. */
7807 e1kPowerOff,
7808 /* pfnSoftReset */
7809 NULL,
7810 /* u32VersionEnd */
7811 PDM_DEVREG_VERSION
7812};
7813
7814#endif /* IN_RING3 */
7815#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette