VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 40998

Last change on this file since 40998 was 40998, checked in by vboxsync, 13 years ago

DevE1000: uninitialized variable

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 269.2 KB
Line 
1/* $Id: DevE1000.cpp 40998 2012-04-20 06:29:19Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2011 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28#define LOG_GROUP LOG_GROUP_DEV_E1000
29
30//#define E1kLogRel(a) LogRel(a)
31#define E1kLogRel(a)
32
33/* Options *******************************************************************/
34/*
35 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
36 * table to MAC address obtained from CFGM. Most guests read MAC address from
37 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
38 * being already set (see #4657).
39 */
40#define E1K_INIT_RA0
41/*
42 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
43 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
44 * that requires it is Mac OS X (see #4657).
45 */
46#define E1K_LSC_ON_SLU
47/*
48 * E1K_ITR_ENABLED reduces the number of interrupts generated by E1000 if a
49 * guest driver requested it by writing non-zero value to the Interrupt
50 * Throttling Register (see section 13.4.18 in "8254x Family of Gigabit
51 * Ethernet Controllers Software Developer’s Manual").
52 */
53#define E1K_ITR_ENABLED
54/*
55 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
56 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
57 * register. Enabling it showed no positive effects on existing guests so it
58 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
59 * Ethernet Controllers Software Developer’s Manual" for more detailed
60 * explanation.
61 */
62//#define E1K_USE_TX_TIMERS
63/*
64 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
65 * Transmit Absolute Delay time. This timer sets the maximum time interval
66 * during which TX interrupts can be postponed (delayed). It has no effect
67 * if E1K_USE_TX_TIMERS is not defined.
68 */
69//#define E1K_NO_TAD
70/*
71 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
72 */
73//#define E1K_REL_DEBUG
74/*
75 * E1K_INT_STATS enables collection of internal statistics used for
76 * debugging of delayed interrupts, etc.
77 */
78//#define E1K_INT_STATS
79/*
80 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
81 */
82//#define E1K_WITH_MSI
83/*
84 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
85 * single physical memory read (or two if it wraps around the end of TX
86 * descriptor ring). It is required for proper functioning of bandwidth
87 * resource control as it allows to compute exact sizes of packets prior
88 * to allocating their buffers (see #5582).
89 */
90//#define E1K_WITH_TXD_CACHE 1
91/* End of Options ************************************************************/
92
93#ifdef E1K_WITH_TXD_CACHE
94/*
95 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
96 * in the state structure. It limits the amount of descriptors loaded in one
97 * batch read. For example, Windows XP guest uses about 5 descriptors per
98 * TSE packet.
99 */
100#define E1K_TXD_CACHE_SIZE 16u
101#endif /* E1K_WITH_TXD_CACHE */
102
103#include <iprt/crc.h>
104#include <iprt/ctype.h>
105#include <iprt/net.h>
106#include <iprt/semaphore.h>
107#include <iprt/string.h>
108#include <iprt/uuid.h>
109#include <VBox/vmm/pdmdev.h>
110#include <VBox/vmm/pdmnetifs.h>
111#include <VBox/vmm/pdmnetinline.h>
112#include <VBox/param.h>
113#include "VBoxDD.h"
114
115#include "DevEEPROM.h"
116#include "DevE1000Phy.h"
117
118/* Little helpers ************************************************************/
119#undef htons
120#undef ntohs
121#undef htonl
122#undef ntohl
123#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
124#define ntohs(x) htons(x)
125#define htonl(x) ASMByteSwapU32(x)
126#define ntohl(x) htonl(x)
127
128#ifndef DEBUG
129# ifdef E1K_REL_DEBUG
130# define DEBUG
131# define E1kLog(a) LogRel(a)
132# define E1kLog2(a) LogRel(a)
133# define E1kLog3(a) LogRel(a)
134//# define E1kLog3(a) do {} while (0)
135# else
136# define E1kLog(a) do {} while (0)
137# define E1kLog2(a) do {} while (0)
138# define E1kLog3(a) do {} while (0)
139# endif
140#else
141# define E1kLog(a) Log(a)
142# define E1kLog2(a) Log2(a)
143# define E1kLog3(a) Log3(a)
144//# define E1kLog(a) do {} while (0)
145//# define E1kLog2(a) do {} while (0)
146//# define E1kLog3(a) do {} while (0)
147#endif
148
149//#undef DEBUG
150
151#define INSTANCE(pState) pState->szInstance
152#define STATE_TO_DEVINS(pState) (((E1KSTATE *)pState)->CTX_SUFF(pDevIns))
153#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
154
155#define E1K_INC_CNT32(cnt) \
156do { \
157 if (cnt < UINT32_MAX) \
158 cnt++; \
159} while (0)
160
161#define E1K_ADD_CNT64(cntLo, cntHi, val) \
162do { \
163 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
164 uint64_t tmp = u64Cnt; \
165 u64Cnt += val; \
166 if (tmp > u64Cnt ) \
167 u64Cnt = UINT64_MAX; \
168 cntLo = (uint32_t)u64Cnt; \
169 cntHi = (uint32_t)(u64Cnt >> 32); \
170} while (0)
171
172#ifdef E1K_INT_STATS
173# define E1K_INC_ISTAT_CNT(cnt) ++cnt
174#else /* E1K_INT_STATS */
175# define E1K_INC_ISTAT_CNT(cnt)
176#endif /* E1K_INT_STATS */
177
178
179/*****************************************************************************/
180
181typedef uint32_t E1KCHIP;
182#define E1K_CHIP_82540EM 0
183#define E1K_CHIP_82543GC 1
184#define E1K_CHIP_82545EM 2
185
186struct E1kChips
187{
188 uint16_t uPCIVendorId;
189 uint16_t uPCIDeviceId;
190 uint16_t uPCISubsystemVendorId;
191 uint16_t uPCISubsystemId;
192 const char *pcszName;
193} g_Chips[] =
194{
195 /* Vendor Device SSVendor SubSys Name */
196 { 0x8086,
197 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
198#ifdef E1K_WITH_MSI
199 0x105E,
200#else
201 0x100E,
202#endif
203 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
204 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
205 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
206};
207
208
209/* The size of register area mapped to I/O space */
210#define E1K_IOPORT_SIZE 0x8
211/* The size of memory-mapped register area */
212#define E1K_MM_SIZE 0x20000
213
214#define E1K_MAX_TX_PKT_SIZE 16288
215#define E1K_MAX_RX_PKT_SIZE 16384
216
217/*****************************************************************************/
218
219/** Gets the specfieid bits from the register. */
220#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
221#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
222#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
223#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
224#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
225
226#define CTRL_SLU 0x00000040
227#define CTRL_MDIO 0x00100000
228#define CTRL_MDC 0x00200000
229#define CTRL_MDIO_DIR 0x01000000
230#define CTRL_MDC_DIR 0x02000000
231#define CTRL_RESET 0x04000000
232#define CTRL_VME 0x40000000
233
234#define STATUS_LU 0x00000002
235#define STATUS_TXOFF 0x00000010
236
237#define EECD_EE_WIRES 0x0F
238#define EECD_EE_REQ 0x40
239#define EECD_EE_GNT 0x80
240
241#define EERD_START 0x00000001
242#define EERD_DONE 0x00000010
243#define EERD_DATA_MASK 0xFFFF0000
244#define EERD_DATA_SHIFT 16
245#define EERD_ADDR_MASK 0x0000FF00
246#define EERD_ADDR_SHIFT 8
247
248#define MDIC_DATA_MASK 0x0000FFFF
249#define MDIC_DATA_SHIFT 0
250#define MDIC_REG_MASK 0x001F0000
251#define MDIC_REG_SHIFT 16
252#define MDIC_PHY_MASK 0x03E00000
253#define MDIC_PHY_SHIFT 21
254#define MDIC_OP_WRITE 0x04000000
255#define MDIC_OP_READ 0x08000000
256#define MDIC_READY 0x10000000
257#define MDIC_INT_EN 0x20000000
258#define MDIC_ERROR 0x40000000
259
260#define TCTL_EN 0x00000002
261#define TCTL_PSP 0x00000008
262
263#define RCTL_EN 0x00000002
264#define RCTL_UPE 0x00000008
265#define RCTL_MPE 0x00000010
266#define RCTL_LPE 0x00000020
267#define RCTL_LBM_MASK 0x000000C0
268#define RCTL_LBM_SHIFT 6
269#define RCTL_RDMTS_MASK 0x00000300
270#define RCTL_RDMTS_SHIFT 8
271#define RCTL_LBM_TCVR 3 /**< PHY or external SerDes loopback. */
272#define RCTL_MO_MASK 0x00003000
273#define RCTL_MO_SHIFT 12
274#define RCTL_BAM 0x00008000
275#define RCTL_BSIZE_MASK 0x00030000
276#define RCTL_BSIZE_SHIFT 16
277#define RCTL_VFE 0x00040000
278#define RCTL_CFIEN 0x00080000
279#define RCTL_CFI 0x00100000
280#define RCTL_BSEX 0x02000000
281#define RCTL_SECRC 0x04000000
282
283#define ICR_TXDW 0x00000001
284#define ICR_TXQE 0x00000002
285#define ICR_LSC 0x00000004
286#define ICR_RXDMT0 0x00000010
287#define ICR_RXT0 0x00000080
288#define ICR_TXD_LOW 0x00008000
289#define RDTR_FPD 0x80000000
290
291#define PBA_st ((PBAST*)(pState->auRegs + PBA_IDX))
292typedef struct
293{
294 unsigned rxa : 7;
295 unsigned rxa_r : 9;
296 unsigned txa : 16;
297} PBAST;
298AssertCompileSize(PBAST, 4);
299
300#define TXDCTL_WTHRESH_MASK 0x003F0000
301#define TXDCTL_WTHRESH_SHIFT 16
302#define TXDCTL_LWTHRESH_MASK 0xFE000000
303#define TXDCTL_LWTHRESH_SHIFT 25
304
305#define RXCSUM_PCSS_MASK 0x000000FF
306#define RXCSUM_PCSS_SHIFT 0
307
308/* Register access macros ****************************************************/
309#define CTRL pState->auRegs[CTRL_IDX]
310#define STATUS pState->auRegs[STATUS_IDX]
311#define EECD pState->auRegs[EECD_IDX]
312#define EERD pState->auRegs[EERD_IDX]
313#define CTRL_EXT pState->auRegs[CTRL_EXT_IDX]
314#define FLA pState->auRegs[FLA_IDX]
315#define MDIC pState->auRegs[MDIC_IDX]
316#define FCAL pState->auRegs[FCAL_IDX]
317#define FCAH pState->auRegs[FCAH_IDX]
318#define FCT pState->auRegs[FCT_IDX]
319#define VET pState->auRegs[VET_IDX]
320#define ICR pState->auRegs[ICR_IDX]
321#define ITR pState->auRegs[ITR_IDX]
322#define ICS pState->auRegs[ICS_IDX]
323#define IMS pState->auRegs[IMS_IDX]
324#define IMC pState->auRegs[IMC_IDX]
325#define RCTL pState->auRegs[RCTL_IDX]
326#define FCTTV pState->auRegs[FCTTV_IDX]
327#define TXCW pState->auRegs[TXCW_IDX]
328#define RXCW pState->auRegs[RXCW_IDX]
329#define TCTL pState->auRegs[TCTL_IDX]
330#define TIPG pState->auRegs[TIPG_IDX]
331#define AIFS pState->auRegs[AIFS_IDX]
332#define LEDCTL pState->auRegs[LEDCTL_IDX]
333#define PBA pState->auRegs[PBA_IDX]
334#define FCRTL pState->auRegs[FCRTL_IDX]
335#define FCRTH pState->auRegs[FCRTH_IDX]
336#define RDFH pState->auRegs[RDFH_IDX]
337#define RDFT pState->auRegs[RDFT_IDX]
338#define RDFHS pState->auRegs[RDFHS_IDX]
339#define RDFTS pState->auRegs[RDFTS_IDX]
340#define RDFPC pState->auRegs[RDFPC_IDX]
341#define RDBAL pState->auRegs[RDBAL_IDX]
342#define RDBAH pState->auRegs[RDBAH_IDX]
343#define RDLEN pState->auRegs[RDLEN_IDX]
344#define RDH pState->auRegs[RDH_IDX]
345#define RDT pState->auRegs[RDT_IDX]
346#define RDTR pState->auRegs[RDTR_IDX]
347#define RXDCTL pState->auRegs[RXDCTL_IDX]
348#define RADV pState->auRegs[RADV_IDX]
349#define RSRPD pState->auRegs[RSRPD_IDX]
350#define TXDMAC pState->auRegs[TXDMAC_IDX]
351#define TDFH pState->auRegs[TDFH_IDX]
352#define TDFT pState->auRegs[TDFT_IDX]
353#define TDFHS pState->auRegs[TDFHS_IDX]
354#define TDFTS pState->auRegs[TDFTS_IDX]
355#define TDFPC pState->auRegs[TDFPC_IDX]
356#define TDBAL pState->auRegs[TDBAL_IDX]
357#define TDBAH pState->auRegs[TDBAH_IDX]
358#define TDLEN pState->auRegs[TDLEN_IDX]
359#define TDH pState->auRegs[TDH_IDX]
360#define TDT pState->auRegs[TDT_IDX]
361#define TIDV pState->auRegs[TIDV_IDX]
362#define TXDCTL pState->auRegs[TXDCTL_IDX]
363#define TADV pState->auRegs[TADV_IDX]
364#define TSPMT pState->auRegs[TSPMT_IDX]
365#define CRCERRS pState->auRegs[CRCERRS_IDX]
366#define ALGNERRC pState->auRegs[ALGNERRC_IDX]
367#define SYMERRS pState->auRegs[SYMERRS_IDX]
368#define RXERRC pState->auRegs[RXERRC_IDX]
369#define MPC pState->auRegs[MPC_IDX]
370#define SCC pState->auRegs[SCC_IDX]
371#define ECOL pState->auRegs[ECOL_IDX]
372#define MCC pState->auRegs[MCC_IDX]
373#define LATECOL pState->auRegs[LATECOL_IDX]
374#define COLC pState->auRegs[COLC_IDX]
375#define DC pState->auRegs[DC_IDX]
376#define TNCRS pState->auRegs[TNCRS_IDX]
377#define SEC pState->auRegs[SEC_IDX]
378#define CEXTERR pState->auRegs[CEXTERR_IDX]
379#define RLEC pState->auRegs[RLEC_IDX]
380#define XONRXC pState->auRegs[XONRXC_IDX]
381#define XONTXC pState->auRegs[XONTXC_IDX]
382#define XOFFRXC pState->auRegs[XOFFRXC_IDX]
383#define XOFFTXC pState->auRegs[XOFFTXC_IDX]
384#define FCRUC pState->auRegs[FCRUC_IDX]
385#define PRC64 pState->auRegs[PRC64_IDX]
386#define PRC127 pState->auRegs[PRC127_IDX]
387#define PRC255 pState->auRegs[PRC255_IDX]
388#define PRC511 pState->auRegs[PRC511_IDX]
389#define PRC1023 pState->auRegs[PRC1023_IDX]
390#define PRC1522 pState->auRegs[PRC1522_IDX]
391#define GPRC pState->auRegs[GPRC_IDX]
392#define BPRC pState->auRegs[BPRC_IDX]
393#define MPRC pState->auRegs[MPRC_IDX]
394#define GPTC pState->auRegs[GPTC_IDX]
395#define GORCL pState->auRegs[GORCL_IDX]
396#define GORCH pState->auRegs[GORCH_IDX]
397#define GOTCL pState->auRegs[GOTCL_IDX]
398#define GOTCH pState->auRegs[GOTCH_IDX]
399#define RNBC pState->auRegs[RNBC_IDX]
400#define RUC pState->auRegs[RUC_IDX]
401#define RFC pState->auRegs[RFC_IDX]
402#define ROC pState->auRegs[ROC_IDX]
403#define RJC pState->auRegs[RJC_IDX]
404#define MGTPRC pState->auRegs[MGTPRC_IDX]
405#define MGTPDC pState->auRegs[MGTPDC_IDX]
406#define MGTPTC pState->auRegs[MGTPTC_IDX]
407#define TORL pState->auRegs[TORL_IDX]
408#define TORH pState->auRegs[TORH_IDX]
409#define TOTL pState->auRegs[TOTL_IDX]
410#define TOTH pState->auRegs[TOTH_IDX]
411#define TPR pState->auRegs[TPR_IDX]
412#define TPT pState->auRegs[TPT_IDX]
413#define PTC64 pState->auRegs[PTC64_IDX]
414#define PTC127 pState->auRegs[PTC127_IDX]
415#define PTC255 pState->auRegs[PTC255_IDX]
416#define PTC511 pState->auRegs[PTC511_IDX]
417#define PTC1023 pState->auRegs[PTC1023_IDX]
418#define PTC1522 pState->auRegs[PTC1522_IDX]
419#define MPTC pState->auRegs[MPTC_IDX]
420#define BPTC pState->auRegs[BPTC_IDX]
421#define TSCTC pState->auRegs[TSCTC_IDX]
422#define TSCTFC pState->auRegs[TSCTFC_IDX]
423#define RXCSUM pState->auRegs[RXCSUM_IDX]
424#define WUC pState->auRegs[WUC_IDX]
425#define WUFC pState->auRegs[WUFC_IDX]
426#define WUS pState->auRegs[WUS_IDX]
427#define MANC pState->auRegs[MANC_IDX]
428#define IPAV pState->auRegs[IPAV_IDX]
429#define WUPL pState->auRegs[WUPL_IDX]
430
431/**
432 * Indices of memory-mapped registers in register table
433 */
434typedef enum
435{
436 CTRL_IDX,
437 STATUS_IDX,
438 EECD_IDX,
439 EERD_IDX,
440 CTRL_EXT_IDX,
441 FLA_IDX,
442 MDIC_IDX,
443 FCAL_IDX,
444 FCAH_IDX,
445 FCT_IDX,
446 VET_IDX,
447 ICR_IDX,
448 ITR_IDX,
449 ICS_IDX,
450 IMS_IDX,
451 IMC_IDX,
452 RCTL_IDX,
453 FCTTV_IDX,
454 TXCW_IDX,
455 RXCW_IDX,
456 TCTL_IDX,
457 TIPG_IDX,
458 AIFS_IDX,
459 LEDCTL_IDX,
460 PBA_IDX,
461 FCRTL_IDX,
462 FCRTH_IDX,
463 RDFH_IDX,
464 RDFT_IDX,
465 RDFHS_IDX,
466 RDFTS_IDX,
467 RDFPC_IDX,
468 RDBAL_IDX,
469 RDBAH_IDX,
470 RDLEN_IDX,
471 RDH_IDX,
472 RDT_IDX,
473 RDTR_IDX,
474 RXDCTL_IDX,
475 RADV_IDX,
476 RSRPD_IDX,
477 TXDMAC_IDX,
478 TDFH_IDX,
479 TDFT_IDX,
480 TDFHS_IDX,
481 TDFTS_IDX,
482 TDFPC_IDX,
483 TDBAL_IDX,
484 TDBAH_IDX,
485 TDLEN_IDX,
486 TDH_IDX,
487 TDT_IDX,
488 TIDV_IDX,
489 TXDCTL_IDX,
490 TADV_IDX,
491 TSPMT_IDX,
492 CRCERRS_IDX,
493 ALGNERRC_IDX,
494 SYMERRS_IDX,
495 RXERRC_IDX,
496 MPC_IDX,
497 SCC_IDX,
498 ECOL_IDX,
499 MCC_IDX,
500 LATECOL_IDX,
501 COLC_IDX,
502 DC_IDX,
503 TNCRS_IDX,
504 SEC_IDX,
505 CEXTERR_IDX,
506 RLEC_IDX,
507 XONRXC_IDX,
508 XONTXC_IDX,
509 XOFFRXC_IDX,
510 XOFFTXC_IDX,
511 FCRUC_IDX,
512 PRC64_IDX,
513 PRC127_IDX,
514 PRC255_IDX,
515 PRC511_IDX,
516 PRC1023_IDX,
517 PRC1522_IDX,
518 GPRC_IDX,
519 BPRC_IDX,
520 MPRC_IDX,
521 GPTC_IDX,
522 GORCL_IDX,
523 GORCH_IDX,
524 GOTCL_IDX,
525 GOTCH_IDX,
526 RNBC_IDX,
527 RUC_IDX,
528 RFC_IDX,
529 ROC_IDX,
530 RJC_IDX,
531 MGTPRC_IDX,
532 MGTPDC_IDX,
533 MGTPTC_IDX,
534 TORL_IDX,
535 TORH_IDX,
536 TOTL_IDX,
537 TOTH_IDX,
538 TPR_IDX,
539 TPT_IDX,
540 PTC64_IDX,
541 PTC127_IDX,
542 PTC255_IDX,
543 PTC511_IDX,
544 PTC1023_IDX,
545 PTC1522_IDX,
546 MPTC_IDX,
547 BPTC_IDX,
548 TSCTC_IDX,
549 TSCTFC_IDX,
550 RXCSUM_IDX,
551 WUC_IDX,
552 WUFC_IDX,
553 WUS_IDX,
554 MANC_IDX,
555 IPAV_IDX,
556 WUPL_IDX,
557 MTA_IDX,
558 RA_IDX,
559 VFTA_IDX,
560 IP4AT_IDX,
561 IP6AT_IDX,
562 WUPM_IDX,
563 FFLT_IDX,
564 FFMT_IDX,
565 FFVT_IDX,
566 PBM_IDX,
567 RA_82542_IDX,
568 MTA_82542_IDX,
569 VFTA_82542_IDX,
570 E1K_NUM_OF_REGS
571} E1kRegIndex;
572
573#define E1K_NUM_OF_32BIT_REGS MTA_IDX
574
575
576/**
577 * Define E1000-specific EEPROM layout.
578 */
579class E1kEEPROM
580{
581 public:
582 EEPROM93C46 eeprom;
583
584#ifdef IN_RING3
585 /**
586 * Initialize EEPROM content.
587 *
588 * @param macAddr MAC address of E1000.
589 */
590 void init(RTMAC &macAddr)
591 {
592 eeprom.init();
593 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
594 eeprom.m_au16Data[0x04] = 0xFFFF;
595 /*
596 * bit 3 - full support for power management
597 * bit 10 - full duplex
598 */
599 eeprom.m_au16Data[0x0A] = 0x4408;
600 eeprom.m_au16Data[0x0B] = 0x001E;
601 eeprom.m_au16Data[0x0C] = 0x8086;
602 eeprom.m_au16Data[0x0D] = 0x100E;
603 eeprom.m_au16Data[0x0E] = 0x8086;
604 eeprom.m_au16Data[0x0F] = 0x3040;
605 eeprom.m_au16Data[0x21] = 0x7061;
606 eeprom.m_au16Data[0x22] = 0x280C;
607 eeprom.m_au16Data[0x23] = 0x00C8;
608 eeprom.m_au16Data[0x24] = 0x00C8;
609 eeprom.m_au16Data[0x2F] = 0x0602;
610 updateChecksum();
611 };
612
613 /**
614 * Compute the checksum as required by E1000 and store it
615 * in the last word.
616 */
617 void updateChecksum()
618 {
619 uint16_t u16Checksum = 0;
620
621 for (int i = 0; i < eeprom.SIZE-1; i++)
622 u16Checksum += eeprom.m_au16Data[i];
623 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
624 };
625
626 /**
627 * First 6 bytes of EEPROM contain MAC address.
628 *
629 * @returns MAC address of E1000.
630 */
631 void getMac(PRTMAC pMac)
632 {
633 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
634 };
635
636 uint32_t read()
637 {
638 return eeprom.read();
639 }
640
641 void write(uint32_t u32Wires)
642 {
643 eeprom.write(u32Wires);
644 }
645
646 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
647 {
648 return eeprom.readWord(u32Addr, pu16Value);
649 }
650
651 int load(PSSMHANDLE pSSM)
652 {
653 return eeprom.load(pSSM);
654 }
655
656 void save(PSSMHANDLE pSSM)
657 {
658 eeprom.save(pSSM);
659 }
660#endif /* IN_RING3 */
661};
662
663
664#define E1K_SPEC_VLAN(s) (s & 0xFFF)
665#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
666#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
667
668struct E1kRxDStatus
669{
670 /** @name Descriptor Status field (3.2.3.1)
671 * @{ */
672 unsigned fDD : 1; /**< Descriptor Done. */
673 unsigned fEOP : 1; /**< End of packet. */
674 unsigned fIXSM : 1; /**< Ignore checksum indication. */
675 unsigned fVP : 1; /**< VLAN, matches VET. */
676 unsigned : 1;
677 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
678 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
679 unsigned fPIF : 1; /**< Passed in-exact filter */
680 /** @} */
681 /** @name Descriptor Errors field (3.2.3.2)
682 * (Only valid when fEOP and fDD are set.)
683 * @{ */
684 unsigned fCE : 1; /**< CRC or alignment error. */
685 unsigned : 4; /**< Reserved, varies with different models... */
686 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
687 unsigned fIPE : 1; /**< IP Checksum error. */
688 unsigned fRXE : 1; /**< RX Data error. */
689 /** @} */
690 /** @name Descriptor Special field (3.2.3.3)
691 * @{ */
692 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
693 /** @} */
694};
695typedef struct E1kRxDStatus E1KRXDST;
696
697struct E1kRxDesc_st
698{
699 uint64_t u64BufAddr; /**< Address of data buffer */
700 uint16_t u16Length; /**< Length of data in buffer */
701 uint16_t u16Checksum; /**< Packet checksum */
702 E1KRXDST status;
703};
704typedef struct E1kRxDesc_st E1KRXDESC;
705AssertCompileSize(E1KRXDESC, 16);
706
707#define E1K_DTYP_LEGACY -1
708#define E1K_DTYP_CONTEXT 0
709#define E1K_DTYP_DATA 1
710
711struct E1kTDLegacy
712{
713 uint64_t u64BufAddr; /**< Address of data buffer */
714 struct TDLCmd_st
715 {
716 unsigned u16Length : 16;
717 unsigned u8CSO : 8;
718 /* CMD field : 8 */
719 unsigned fEOP : 1;
720 unsigned fIFCS : 1;
721 unsigned fIC : 1;
722 unsigned fRS : 1;
723 unsigned fRSV : 1;
724 unsigned fDEXT : 1;
725 unsigned fVLE : 1;
726 unsigned fIDE : 1;
727 } cmd;
728 struct TDLDw3_st
729 {
730 /* STA field */
731 unsigned fDD : 1;
732 unsigned fEC : 1;
733 unsigned fLC : 1;
734 unsigned fTURSV : 1;
735 /* RSV field */
736 unsigned u4RSV : 4;
737 /* CSS field */
738 unsigned u8CSS : 8;
739 /* Special field*/
740 unsigned u16Special: 16;
741 } dw3;
742};
743
744/**
745 * TCP/IP Context Transmit Descriptor, section 3.3.6.
746 */
747struct E1kTDContext
748{
749 struct CheckSum_st
750 {
751 /** TSE: Header start. !TSE: Checksum start. */
752 unsigned u8CSS : 8;
753 /** Checksum offset - where to store it. */
754 unsigned u8CSO : 8;
755 /** Checksum ending (inclusive) offset, 0 = end of packet. */
756 unsigned u16CSE : 16;
757 } ip;
758 struct CheckSum_st tu;
759 struct TDCDw2_st
760 {
761 /** TSE: The total number of payload bytes for this context. Sans header. */
762 unsigned u20PAYLEN : 20;
763 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
764 unsigned u4DTYP : 4;
765 /** TUCMD field, 8 bits
766 * @{ */
767 /** TSE: TCP (set) or UDP (clear). */
768 unsigned fTCP : 1;
769 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
770 * the IP header. Does not affect the checksumming.
771 * @remarks 82544GC/EI interprets a cleared field differently. */
772 unsigned fIP : 1;
773 /** TSE: TCP segmentation enable. When clear the context describes */
774 unsigned fTSE : 1;
775 /** Report status (only applies to dw3.fDD for here). */
776 unsigned fRS : 1;
777 /** Reserved, MBZ. */
778 unsigned fRSV1 : 1;
779 /** Descriptor extension, must be set for this descriptor type. */
780 unsigned fDEXT : 1;
781 /** Reserved, MBZ. */
782 unsigned fRSV2 : 1;
783 /** Interrupt delay enable. */
784 unsigned fIDE : 1;
785 /** @} */
786 } dw2;
787 struct TDCDw3_st
788 {
789 /** Descriptor Done. */
790 unsigned fDD : 1;
791 /** Reserved, MBZ. */
792 unsigned u7RSV : 7;
793 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
794 unsigned u8HDRLEN : 8;
795 /** TSO: Maximum segment size. */
796 unsigned u16MSS : 16;
797 } dw3;
798};
799typedef struct E1kTDContext E1KTXCTX;
800
801/**
802 * TCP/IP Data Transmit Descriptor, section 3.3.7.
803 */
804struct E1kTDData
805{
806 uint64_t u64BufAddr; /**< Address of data buffer */
807 struct TDDCmd_st
808 {
809 /** The total length of data pointed to by this descriptor. */
810 unsigned u20DTALEN : 20;
811 /** The descriptor type - E1K_DTYP_DATA (1). */
812 unsigned u4DTYP : 4;
813 /** @name DCMD field, 8 bits (3.3.7.1).
814 * @{ */
815 /** End of packet. Note TSCTFC update. */
816 unsigned fEOP : 1;
817 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
818 unsigned fIFCS : 1;
819 /** Use the TSE context when set and the normal when clear. */
820 unsigned fTSE : 1;
821 /** Report status (dw3.STA). */
822 unsigned fRS : 1;
823 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
824 unsigned fRSV : 1;
825 /** Descriptor extension, must be set for this descriptor type. */
826 unsigned fDEXT : 1;
827 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
828 * Insert dw3.SPECIAL after ethernet header. */
829 unsigned fVLE : 1;
830 /** Interrupt delay enable. */
831 unsigned fIDE : 1;
832 /** @} */
833 } cmd;
834 struct TDDDw3_st
835 {
836 /** @name STA field (3.3.7.2)
837 * @{ */
838 unsigned fDD : 1; /**< Descriptor done. */
839 unsigned fEC : 1; /**< Excess collision. */
840 unsigned fLC : 1; /**< Late collision. */
841 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
842 unsigned fTURSV : 1;
843 /** @} */
844 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
845 /** @name POPTS (Packet Option) field (3.3.7.3)
846 * @{ */
847 unsigned fIXSM : 1; /**< Insert IP checksum. */
848 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
849 unsigned u6RSV : 6; /**< Reserved, MBZ. */
850 /** @} */
851 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
852 * Requires fEOP, fVLE and CTRL.VME to be set.
853 * @{ */
854 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
855 /** @} */
856 } dw3;
857};
858typedef struct E1kTDData E1KTXDAT;
859
860union E1kTxDesc
861{
862 struct E1kTDLegacy legacy;
863 struct E1kTDContext context;
864 struct E1kTDData data;
865};
866typedef union E1kTxDesc E1KTXDESC;
867AssertCompileSize(E1KTXDESC, 16);
868
869#define RA_CTL_AS 0x0003
870#define RA_CTL_AV 0x8000
871
872union E1kRecAddr
873{
874 uint32_t au32[32];
875 struct RAArray
876 {
877 uint8_t addr[6];
878 uint16_t ctl;
879 } array[16];
880};
881typedef struct E1kRecAddr::RAArray E1KRAELEM;
882typedef union E1kRecAddr E1KRA;
883AssertCompileSize(E1KRA, 8*16);
884
885#define E1K_IP_RF 0x8000 /* reserved fragment flag */
886#define E1K_IP_DF 0x4000 /* dont fragment flag */
887#define E1K_IP_MF 0x2000 /* more fragments flag */
888#define E1K_IP_OFFMASK 0x1fff /* mask for fragmenting bits */
889
890/** @todo use+extend RTNETIPV4 */
891struct E1kIpHeader
892{
893 /* type of service / version / header length */
894 uint16_t tos_ver_hl;
895 /* total length */
896 uint16_t total_len;
897 /* identification */
898 uint16_t ident;
899 /* fragment offset field */
900 uint16_t offset;
901 /* time to live / protocol*/
902 uint16_t ttl_proto;
903 /* checksum */
904 uint16_t chksum;
905 /* source IP address */
906 uint32_t src;
907 /* destination IP address */
908 uint32_t dest;
909};
910AssertCompileSize(struct E1kIpHeader, 20);
911
912#define E1K_TCP_FIN 0x01U
913#define E1K_TCP_SYN 0x02U
914#define E1K_TCP_RST 0x04U
915#define E1K_TCP_PSH 0x08U
916#define E1K_TCP_ACK 0x10U
917#define E1K_TCP_URG 0x20U
918#define E1K_TCP_ECE 0x40U
919#define E1K_TCP_CWR 0x80U
920
921#define E1K_TCP_FLAGS 0x3fU
922
923/** @todo use+extend RTNETTCP */
924struct E1kTcpHeader
925{
926 uint16_t src;
927 uint16_t dest;
928 uint32_t seqno;
929 uint32_t ackno;
930 uint16_t hdrlen_flags;
931 uint16_t wnd;
932 uint16_t chksum;
933 uint16_t urgp;
934};
935AssertCompileSize(struct E1kTcpHeader, 20);
936
937
938/** The current Saved state version. */
939#define E1K_SAVEDSTATE_VERSION 3
940/** Saved state version for VirtualBox 4.1 and earlier.
941 * These did not include VLAN tag fields. */
942#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
943/** Saved state version for VirtualBox 3.0 and earlier.
944 * This did not include the configuration part nor the E1kEEPROM. */
945#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
946
947/**
948 * Device state structure. Holds the current state of device.
949 *
950 * @implements PDMINETWORKDOWN
951 * @implements PDMINETWORKCONFIG
952 * @implements PDMILEDPORTS
953 */
954struct E1kState_st
955{
956 char szInstance[8]; /**< Instance name, e.g. E1000#1. */
957 PDMIBASE IBase;
958 PDMINETWORKDOWN INetworkDown;
959 PDMINETWORKCONFIG INetworkConfig;
960 PDMILEDPORTS ILeds; /**< LED interface */
961 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
962 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
963
964 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
965 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
966 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
967 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
968 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
969 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
970 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
971 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
972 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
973 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
974 /** The scatter / gather buffer used for the current outgoing packet - R3. */
975 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
976
977 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
978 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
979 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
980 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
981 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
982 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
983 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
984 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
985 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
986 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
987 /** The scatter / gather buffer used for the current outgoing packet - R0. */
988 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
989
990 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
991 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
992 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
993 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
994 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
995 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
996 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
997 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
998 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
999 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1000 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1001 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1002 RTRCPTR RCPtrAlignment;
1003
1004#if HC_ARCH_BITS == 32
1005 uint32_t Alignment1;
1006#endif
1007 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1008 PDMCRITSECT csRx; /**< RX Critical section. */
1009// PDMCRITSECT csTx; /**< TX Critical section. */
1010 /** Base address of memory-mapped registers. */
1011 RTGCPHYS addrMMReg;
1012 /** MAC address obtained from the configuration. */
1013 RTMAC macConfigured;
1014 /** Base port of I/O space region. */
1015 RTIOPORT addrIOPort;
1016 /** EMT: */
1017 PCIDEVICE pciDevice;
1018 /** EMT: Last time the interrupt was acknowledged. */
1019 uint64_t u64AckedAt;
1020 /** All: Used for eliminating spurious interrupts. */
1021 bool fIntRaised;
1022 /** EMT: false if the cable is disconnected by the GUI. */
1023 bool fCableConnected;
1024 /** EMT: */
1025 bool fR0Enabled;
1026 /** EMT: */
1027 bool fGCEnabled;
1028 /** EMT: Compute Ethernet CRC for RX packets. */
1029 bool fEthernetCRC;
1030
1031 bool Alignment2[3];
1032 uint32_t Alignment3;
1033
1034 /** All: Device register storage. */
1035 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1036 /** TX/RX: Status LED. */
1037 PDMLED led;
1038 /** TX/RX: Number of packet being sent/received to show in debug log. */
1039 uint32_t u32PktNo;
1040
1041 /** EMT: Offset of the register to be read via IO. */
1042 uint32_t uSelectedReg;
1043 /** EMT: Multicast Table Array. */
1044 uint32_t auMTA[128];
1045 /** EMT: Receive Address registers. */
1046 E1KRA aRecAddr;
1047 /** EMT: VLAN filter table array. */
1048 uint32_t auVFTA[128];
1049 /** EMT: Receive buffer size. */
1050 uint16_t u16RxBSize;
1051 /** EMT: Locked state -- no state alteration possible. */
1052 bool fLocked;
1053 /** EMT: */
1054 bool fDelayInts;
1055 /** All: */
1056 bool fIntMaskUsed;
1057
1058 /** N/A: */
1059 bool volatile fMaybeOutOfSpace;
1060 /** EMT: Gets signalled when more RX descriptors become available. */
1061 RTSEMEVENT hEventMoreRxDescAvail;
1062
1063 /** TX: Context used for TCP segmentation packets. */
1064 E1KTXCTX contextTSE;
1065 /** TX: Context used for ordinary packets. */
1066 E1KTXCTX contextNormal;
1067#ifdef E1K_WITH_TXD_CACHE
1068 /** EMT/TX: Fetched TX descriptors. */
1069 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1070 /** EMT/TX: Actual number of fetched TX descriptors. */
1071 uint8_t nTxDFetched;
1072 /** EMT/TX: Index in cache of TX descriptor being processed. */
1073 uint8_t iTxDCurrent;
1074 /** EMT/TX: Will this frame be sent as GSO. */
1075 bool fGSO;
1076 /** EMT/TX: Number of bytes in next packet. */
1077 uint32_t cbTxAlloc;
1078
1079#endif /* E1K_WITH_TXD_CACHE */
1080 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1081 * applicable to the current TSE mode. */
1082 PDMNETWORKGSO GsoCtx;
1083 /** Scratch space for holding the loopback / fallback scatter / gather
1084 * descriptor. */
1085 union
1086 {
1087 PDMSCATTERGATHER Sg;
1088 uint8_t padding[8 * sizeof(RTUINTPTR)];
1089 } uTxFallback;
1090 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1091 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1092 /** TX: Number of bytes assembled in TX packet buffer. */
1093 uint16_t u16TxPktLen;
1094 /** TX: IP checksum has to be inserted if true. */
1095 bool fIPcsum;
1096 /** TX: TCP/UDP checksum has to be inserted if true. */
1097 bool fTCPcsum;
1098 /** TX: VLAN tag has to be inserted if true. */
1099 bool fVTag;
1100 /** TX: TCI part of VLAN tag to be inserted. */
1101 uint16_t u16VTagTCI;
1102 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1103 uint32_t u32PayRemain;
1104 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1105 uint16_t u16HdrRemain;
1106 /** TX TSE fallback: Flags from template header. */
1107 uint16_t u16SavedFlags;
1108 /** TX TSE fallback: Partial checksum from template header. */
1109 uint32_t u32SavedCsum;
1110 /** ?: Emulated controller type. */
1111 E1KCHIP eChip;
1112
1113 /** EMT: EEPROM emulation */
1114 E1kEEPROM eeprom;
1115 /** EMT: Physical interface emulation. */
1116 PHY phy;
1117
1118#if 0
1119 /** Alignment padding. */
1120 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1121#endif
1122
1123 STAMCOUNTER StatReceiveBytes;
1124 STAMCOUNTER StatTransmitBytes;
1125#if defined(VBOX_WITH_STATISTICS)
1126 STAMPROFILEADV StatMMIOReadRZ;
1127 STAMPROFILEADV StatMMIOReadR3;
1128 STAMPROFILEADV StatMMIOWriteRZ;
1129 STAMPROFILEADV StatMMIOWriteR3;
1130 STAMPROFILEADV StatEEPROMRead;
1131 STAMPROFILEADV StatEEPROMWrite;
1132 STAMPROFILEADV StatIOReadRZ;
1133 STAMPROFILEADV StatIOReadR3;
1134 STAMPROFILEADV StatIOWriteRZ;
1135 STAMPROFILEADV StatIOWriteR3;
1136 STAMPROFILEADV StatLateIntTimer;
1137 STAMCOUNTER StatLateInts;
1138 STAMCOUNTER StatIntsRaised;
1139 STAMCOUNTER StatIntsPrevented;
1140 STAMPROFILEADV StatReceive;
1141 STAMPROFILEADV StatReceiveCRC;
1142 STAMPROFILEADV StatReceiveFilter;
1143 STAMPROFILEADV StatReceiveStore;
1144 STAMPROFILEADV StatTransmitRZ;
1145 STAMPROFILEADV StatTransmitR3;
1146 STAMPROFILE StatTransmitSendRZ;
1147 STAMPROFILE StatTransmitSendR3;
1148 STAMPROFILE StatRxOverflow;
1149 STAMCOUNTER StatRxOverflowWakeup;
1150 STAMCOUNTER StatTxDescCtxNormal;
1151 STAMCOUNTER StatTxDescCtxTSE;
1152 STAMCOUNTER StatTxDescLegacy;
1153 STAMCOUNTER StatTxDescData;
1154 STAMCOUNTER StatTxDescTSEData;
1155 STAMCOUNTER StatTxPathFallback;
1156 STAMCOUNTER StatTxPathGSO;
1157 STAMCOUNTER StatTxPathRegular;
1158 STAMCOUNTER StatPHYAccesses;
1159
1160#endif /* VBOX_WITH_STATISTICS */
1161
1162#ifdef E1K_INT_STATS
1163 /* Internal stats */
1164 uint32_t uStatInt;
1165 uint32_t uStatIntTry;
1166 int32_t uStatIntLower;
1167 uint32_t uStatIntDly;
1168 int32_t iStatIntLost;
1169 int32_t iStatIntLostOne;
1170 uint32_t uStatDisDly;
1171 uint32_t uStatIntSkip;
1172 uint32_t uStatIntLate;
1173 uint32_t uStatIntMasked;
1174 uint32_t uStatIntEarly;
1175 uint32_t uStatIntRx;
1176 uint32_t uStatIntTx;
1177 uint32_t uStatIntICS;
1178 uint32_t uStatIntRDTR;
1179 uint32_t uStatIntRXDMT0;
1180 uint32_t uStatIntTXQE;
1181 uint32_t uStatTxNoRS;
1182 uint32_t uStatTxIDE;
1183 uint32_t uStatTAD;
1184 uint32_t uStatTID;
1185 uint32_t uStatRAD;
1186 uint32_t uStatRID;
1187 uint32_t uStatRxFrm;
1188 uint32_t uStatTxFrm;
1189 uint32_t uStatDescCtx;
1190 uint32_t uStatDescDat;
1191 uint32_t uStatDescLeg;
1192#endif /* E1K_INT_STATS */
1193};
1194typedef struct E1kState_st E1KSTATE;
1195
1196#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1197
1198/* Forward declarations ******************************************************/
1199static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread);
1200
1201static int e1kRegReadUnimplemented (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1202static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1203static int e1kRegReadAutoClear (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1204static int e1kRegReadDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1205static int e1kRegWriteDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1206#if 0 /* unused */
1207static int e1kRegReadCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1208#endif
1209static int e1kRegWriteCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1210static int e1kRegReadEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1211static int e1kRegWriteEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1212static int e1kRegWriteEERD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1213static int e1kRegWriteMDIC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1214static int e1kRegReadICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1215static int e1kRegWriteICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1216static int e1kRegWriteICS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1217static int e1kRegWriteIMS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1218static int e1kRegWriteIMC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1219static int e1kRegWriteRCTL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1220static int e1kRegWritePBA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1221static int e1kRegWriteRDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1222static int e1kRegWriteRDTR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1223static int e1kRegWriteTDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1224static int e1kRegReadMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1225static int e1kRegWriteMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1226static int e1kRegReadRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1227static int e1kRegWriteRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1228static int e1kRegReadVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1229static int e1kRegWriteVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1230
1231/**
1232 * Register map table.
1233 *
1234 * Override fn_read and fn_write to get register-specific behavior.
1235 */
1236const static struct E1kRegMap_st
1237{
1238 /** Register offset in the register space. */
1239 uint32_t offset;
1240 /** Size in bytes. Registers of size > 4 are in fact tables. */
1241 uint32_t size;
1242 /** Readable bits. */
1243 uint32_t readable;
1244 /** Writable bits. */
1245 uint32_t writable;
1246 /** Read callback. */
1247 int (*pfnRead)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1248 /** Write callback. */
1249 int (*pfnWrite)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1250 /** Abbreviated name. */
1251 const char *abbrev;
1252 /** Full name. */
1253 const char *name;
1254} s_e1kRegMap[E1K_NUM_OF_REGS] =
1255{
1256 /* offset size read mask write mask read callback write callback abbrev full name */
1257 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1258 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1259 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1260 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1261 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1262 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1263 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1264 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1265 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1266 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1267 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1268 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1269 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1270 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1271 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1272 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1273 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1274 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1275 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1276 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1277 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1278 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1279 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1280 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1281 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1282 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1283 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1284 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1285 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1286 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1287 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1288 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1289 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1290 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1291 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1292 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1293 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1294 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1295 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1296 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1297 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1298 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1299 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1300 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1301 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1302 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1303 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1304 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1305 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1306 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1307 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1308 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1309 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1310 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1311 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1312 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1313 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1314 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1315 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1316 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1317 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1318 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1319 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1320 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1321 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1322 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1323 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1324 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1325 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1326 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1327 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1328 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1329 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1330 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1331 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1332 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1333 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1334 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1335 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1336 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1337 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1338 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1339 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1340 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1341 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1342 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1343 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1344 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1345 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1346 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1347 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1348 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1349 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1350 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1351 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1352 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1353 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1354 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1355 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1356 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1357 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1358 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1359 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1360 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1361 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1362 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1363 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1364 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1365 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1366 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1367 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1368 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1369 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1370 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1371 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1372 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1373 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1374 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1375 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1376 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1377 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1378 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1379 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1380 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1381 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1382 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1383 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1384 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1385 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1386 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1387 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1388 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1389 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n) (82542)" },
1390 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n) (82542)" },
1391 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n) (82542)" }
1392};
1393
1394#ifdef DEBUG
1395
1396/**
1397 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1398 *
1399 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1400 *
1401 * @returns The buffer.
1402 *
1403 * @param u32 The word to convert into string.
1404 * @param mask Selects which bytes to convert.
1405 * @param buf Where to put the result.
1406 */
1407static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1408{
1409 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1410 {
1411 if (mask & 0xF)
1412 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1413 else
1414 *ptr = '.';
1415 }
1416 buf[8] = 0;
1417 return buf;
1418}
1419
1420/**
1421 * Returns timer name for debug purposes.
1422 *
1423 * @returns The timer name.
1424 *
1425 * @param pState The device state structure.
1426 * @param pTimer The timer to get the name for.
1427 */
1428DECLINLINE(const char *) e1kGetTimerName(E1KSTATE *pState, PTMTIMER pTimer)
1429{
1430 if (pTimer == pState->CTX_SUFF(pTIDTimer))
1431 return "TID";
1432 if (pTimer == pState->CTX_SUFF(pTADTimer))
1433 return "TAD";
1434 if (pTimer == pState->CTX_SUFF(pRIDTimer))
1435 return "RID";
1436 if (pTimer == pState->CTX_SUFF(pRADTimer))
1437 return "RAD";
1438 if (pTimer == pState->CTX_SUFF(pIntTimer))
1439 return "Int";
1440 return "unknown";
1441}
1442
1443#endif /* DEBUG */
1444
1445/**
1446 * Arm a timer.
1447 *
1448 * @param pState Pointer to the device state structure.
1449 * @param pTimer Pointer to the timer.
1450 * @param uExpireIn Expiration interval in microseconds.
1451 */
1452DECLINLINE(void) e1kArmTimer(E1KSTATE *pState, PTMTIMER pTimer, uint32_t uExpireIn)
1453{
1454 if (pState->fLocked)
1455 return;
1456
1457 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1458 INSTANCE(pState), e1kGetTimerName(pState, pTimer), uExpireIn));
1459 TMTimerSet(pTimer, TMTimerFromMicro(pTimer, uExpireIn) +
1460 TMTimerGet(pTimer));
1461}
1462
1463/**
1464 * Cancel a timer.
1465 *
1466 * @param pState Pointer to the device state structure.
1467 * @param pTimer Pointer to the timer.
1468 */
1469DECLINLINE(void) e1kCancelTimer(E1KSTATE *pState, PTMTIMER pTimer)
1470{
1471 E1kLog2(("%s Stopping %s timer...\n",
1472 INSTANCE(pState), e1kGetTimerName(pState, pTimer)));
1473 int rc = TMTimerStop(pTimer);
1474 if (RT_FAILURE(rc))
1475 {
1476 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1477 INSTANCE(pState), rc));
1478 }
1479}
1480
1481#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1482#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1483
1484#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1485#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1486
1487#define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1488#define e1kCsTxLeave(ps) do { } while (0)
1489//# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1490//# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1491
1492#ifdef IN_RING3
1493
1494/**
1495 * Wakeup the RX thread.
1496 */
1497static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1498{
1499 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
1500 if ( pState->fMaybeOutOfSpace
1501 && pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1502 {
1503 STAM_COUNTER_INC(&pState->StatRxOverflowWakeup);
1504 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", INSTANCE(pState)));
1505 RTSemEventSignal(pState->hEventMoreRxDescAvail);
1506 }
1507}
1508
1509/**
1510 * Hardware reset. Revert all registers to initial values.
1511 *
1512 * @param pState The device state structure.
1513 */
1514static void e1kHardReset(E1KSTATE *pState)
1515{
1516 E1kLog(("%s Hard reset triggered\n", INSTANCE(pState)));
1517 memset(pState->auRegs, 0, sizeof(pState->auRegs));
1518 memset(pState->aRecAddr.au32, 0, sizeof(pState->aRecAddr.au32));
1519#ifdef E1K_INIT_RA0
1520 memcpy(pState->aRecAddr.au32, pState->macConfigured.au8,
1521 sizeof(pState->macConfigured.au8));
1522 pState->aRecAddr.array[0].ctl |= RA_CTL_AV;
1523#endif /* E1K_INIT_RA0 */
1524 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1525 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1526 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1527 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1528 Assert(GET_BITS(RCTL, BSIZE) == 0);
1529 pState->u16RxBSize = 2048;
1530
1531 /* Reset promiscuous mode */
1532 if (pState->pDrvR3)
1533 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, false);
1534}
1535
1536#endif /* IN_RING3 */
1537
1538/**
1539 * Compute Internet checksum.
1540 *
1541 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1542 *
1543 * @param pState The device state structure.
1544 * @param cpPacket The packet.
1545 * @param cb The size of the packet.
1546 * @param cszText A string denoting direction of packet transfer.
1547 *
1548 * @return The 1's complement of the 1's complement sum.
1549 *
1550 * @thread E1000_TX
1551 */
1552static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1553{
1554 uint32_t csum = 0;
1555 uint16_t *pu16 = (uint16_t *)pvBuf;
1556
1557 while (cb > 1)
1558 {
1559 csum += *pu16++;
1560 cb -= 2;
1561 }
1562 if (cb)
1563 csum += *(uint8_t*)pu16;
1564 while (csum >> 16)
1565 csum = (csum >> 16) + (csum & 0xFFFF);
1566 return ~csum;
1567}
1568
1569/**
1570 * Dump a packet to debug log.
1571 *
1572 * @param pState The device state structure.
1573 * @param cpPacket The packet.
1574 * @param cb The size of the packet.
1575 * @param cszText A string denoting direction of packet transfer.
1576 * @thread E1000_TX
1577 */
1578DECLINLINE(void) e1kPacketDump(E1KSTATE* pState, const uint8_t *cpPacket, size_t cb, const char *cszText)
1579{
1580#ifdef DEBUG
1581 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1582 {
1583 E1kLog(("%s --- %s packet #%d: ---\n",
1584 INSTANCE(pState), cszText, ++pState->u32PktNo));
1585 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1586 e1kCsLeave(pState);
1587 }
1588#else
1589 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1590 {
1591 E1kLogRel(("E1000: %s packet #%d, seq=%x ack=%x\n", cszText, pState->u32PktNo++, ntohl(*(uint32_t*)(cpPacket+0x26)), ntohl(*(uint32_t*)(cpPacket+0x2A))));
1592 e1kCsLeave(pState);
1593 }
1594#endif
1595}
1596
1597/**
1598 * Determine the type of transmit descriptor.
1599 *
1600 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1601 *
1602 * @param pDesc Pointer to descriptor union.
1603 * @thread E1000_TX
1604 */
1605DECLINLINE(int) e1kGetDescType(E1KTXDESC* pDesc)
1606{
1607 if (pDesc->legacy.cmd.fDEXT)
1608 return pDesc->context.dw2.u4DTYP;
1609 return E1K_DTYP_LEGACY;
1610}
1611
1612/**
1613 * Dump receive descriptor to debug log.
1614 *
1615 * @param pState The device state structure.
1616 * @param pDesc Pointer to the descriptor.
1617 * @thread E1000_RX
1618 */
1619static void e1kPrintRDesc(E1KSTATE* pState, E1KRXDESC* pDesc)
1620{
1621 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", INSTANCE(pState), pDesc->u16Length));
1622 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1623 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1624 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1625 pDesc->status.fPIF ? "PIF" : "pif",
1626 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1627 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1628 pDesc->status.fVP ? "VP" : "vp",
1629 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1630 pDesc->status.fEOP ? "EOP" : "eop",
1631 pDesc->status.fDD ? "DD" : "dd",
1632 pDesc->status.fRXE ? "RXE" : "rxe",
1633 pDesc->status.fIPE ? "IPE" : "ipe",
1634 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1635 pDesc->status.fCE ? "CE" : "ce",
1636 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1637 E1K_SPEC_VLAN(pDesc->status.u16Special),
1638 E1K_SPEC_PRI(pDesc->status.u16Special)));
1639}
1640
1641/**
1642 * Dump transmit descriptor to debug log.
1643 *
1644 * @param pState The device state structure.
1645 * @param pDesc Pointer to descriptor union.
1646 * @param cszDir A string denoting direction of descriptor transfer
1647 * @thread E1000_TX
1648 */
1649static void e1kPrintTDesc(E1KSTATE* pState, E1KTXDESC* pDesc, const char* cszDir)
1650{
1651 switch (e1kGetDescType(pDesc))
1652 {
1653 case E1K_DTYP_CONTEXT:
1654 E1kLog2(("%s %s Context Transmit Descriptor %s\n",
1655 INSTANCE(pState), cszDir, cszDir));
1656 E1kLog2((" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1657 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1658 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1659 E1kLog2((" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1660 pDesc->context.dw2.fIDE ? " IDE":"",
1661 pDesc->context.dw2.fRS ? " RS" :"",
1662 pDesc->context.dw2.fTSE ? " TSE":"",
1663 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1664 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1665 pDesc->context.dw2.u20PAYLEN,
1666 pDesc->context.dw3.u8HDRLEN,
1667 pDesc->context.dw3.u16MSS,
1668 pDesc->context.dw3.fDD?"DD":""));
1669 break;
1670 case E1K_DTYP_DATA:
1671 E1kLog2(("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1672 INSTANCE(pState), cszDir, pDesc->data.cmd.u20DTALEN, cszDir));
1673 E1kLog2((" Address=%16LX DTALEN=%05X\n",
1674 pDesc->data.u64BufAddr,
1675 pDesc->data.cmd.u20DTALEN));
1676 E1kLog2((" DCMD:%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1677 pDesc->data.cmd.fIDE ? " IDE" :"",
1678 pDesc->data.cmd.fVLE ? " VLE" :"",
1679 pDesc->data.cmd.fRS ? " RS" :"",
1680 pDesc->data.cmd.fTSE ? " TSE" :"",
1681 pDesc->data.cmd.fIFCS? " IFCS":"",
1682 pDesc->data.cmd.fEOP ? " EOP" :"",
1683 pDesc->data.dw3.fDD ? " DD" :"",
1684 pDesc->data.dw3.fEC ? " EC" :"",
1685 pDesc->data.dw3.fLC ? " LC" :"",
1686 pDesc->data.dw3.fTXSM? " TXSM":"",
1687 pDesc->data.dw3.fIXSM? " IXSM":"",
1688 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1689 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1690 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1691 break;
1692 case E1K_DTYP_LEGACY:
1693 E1kLog2(("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1694 INSTANCE(pState), cszDir, pDesc->legacy.cmd.u16Length, cszDir));
1695 E1kLog2((" Address=%16LX DTALEN=%05X\n",
1696 pDesc->data.u64BufAddr,
1697 pDesc->legacy.cmd.u16Length));
1698 E1kLog2((" CMD:%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1699 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1700 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1701 pDesc->legacy.cmd.fRS ? " RS" :"",
1702 pDesc->legacy.cmd.fIC ? " IC" :"",
1703 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1704 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1705 pDesc->legacy.dw3.fDD ? " DD" :"",
1706 pDesc->legacy.dw3.fEC ? " EC" :"",
1707 pDesc->legacy.dw3.fLC ? " LC" :"",
1708 pDesc->legacy.cmd.u8CSO,
1709 pDesc->legacy.dw3.u8CSS,
1710 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1711 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1712 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1713 break;
1714 default:
1715 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1716 INSTANCE(pState), cszDir, cszDir));
1717 break;
1718 }
1719}
1720
1721/**
1722 * Raise interrupt if not masked.
1723 *
1724 * @param pState The device state structure.
1725 */
1726static int e1kRaiseInterrupt(E1KSTATE *pState, int rcBusy, uint32_t u32IntCause = 0)
1727{
1728 int rc = e1kCsEnter(pState, rcBusy);
1729 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1730 return rc;
1731
1732 E1K_INC_ISTAT_CNT(pState->uStatIntTry);
1733 ICR |= u32IntCause;
1734 if (ICR & IMS)
1735 {
1736#if 0
1737 if (pState->fDelayInts)
1738 {
1739 E1K_INC_ISTAT_CNT(pState->uStatIntDly);
1740 pState->iStatIntLostOne = 1;
1741 E1kLog2(("%s e1kRaiseInterrupt: Delayed. ICR=%08x\n",
1742 INSTANCE(pState), ICR));
1743#define E1K_LOST_IRQ_THRSLD 20
1744//#define E1K_LOST_IRQ_THRSLD 200000000
1745 if (pState->iStatIntLost >= E1K_LOST_IRQ_THRSLD)
1746 {
1747 E1kLog2(("%s WARNING! Disabling delayed interrupt logic: delayed=%d, delivered=%d\n",
1748 INSTANCE(pState), pState->uStatIntDly, pState->uStatIntLate));
1749 pState->fIntMaskUsed = false;
1750 pState->uStatDisDly++;
1751 }
1752 }
1753 else
1754#endif
1755 if (pState->fIntRaised)
1756 {
1757 E1K_INC_ISTAT_CNT(pState->uStatIntSkip);
1758 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1759 INSTANCE(pState), ICR & IMS));
1760 }
1761 else
1762 {
1763#ifdef E1K_ITR_ENABLED
1764 uint64_t tstamp = TMTimerGet(pState->CTX_SUFF(pIntTimer));
1765 /* interrupts/sec = 1 / (256 * 10E-9 * ITR) */
1766 E1kLog2(("%s e1kRaiseInterrupt: tstamp - pState->u64AckedAt = %d, ITR * 256 = %d\n",
1767 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1768 if (!!ITR && pState->fIntMaskUsed && tstamp - pState->u64AckedAt < ITR * 256)
1769 {
1770 E1K_INC_ISTAT_CNT(pState->uStatIntEarly);
1771 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1772 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1773 }
1774 else
1775#endif
1776 {
1777
1778 /* Since we are delivering the interrupt now
1779 * there is no need to do it later -- stop the timer.
1780 */
1781 TMTimerStop(pState->CTX_SUFF(pIntTimer));
1782 E1K_INC_ISTAT_CNT(pState->uStatInt);
1783 STAM_COUNTER_INC(&pState->StatIntsRaised);
1784 /* Got at least one unmasked interrupt cause */
1785 pState->fIntRaised = true;
1786 /* Raise(1) INTA(0) */
1787 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1788 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
1789 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1790 INSTANCE(pState), ICR & IMS));
1791 }
1792 }
1793 }
1794 else
1795 {
1796 E1K_INC_ISTAT_CNT(pState->uStatIntMasked);
1797 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1798 INSTANCE(pState), ICR, IMS));
1799 }
1800 e1kCsLeave(pState);
1801 return VINF_SUCCESS;
1802}
1803
1804/**
1805 * Compute the physical address of the descriptor.
1806 *
1807 * @returns the physical address of the descriptor.
1808 *
1809 * @param baseHigh High-order 32 bits of descriptor table address.
1810 * @param baseLow Low-order 32 bits of descriptor table address.
1811 * @param idxDesc The descriptor index in the table.
1812 */
1813DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1814{
1815 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1816 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1817}
1818
1819/**
1820 * Advance the head pointer of the receive descriptor queue.
1821 *
1822 * @remarks RDH always points to the next available RX descriptor.
1823 *
1824 * @param pState The device state structure.
1825 */
1826DECLINLINE(void) e1kAdvanceRDH(E1KSTATE *pState)
1827{
1828 //e1kCsEnter(pState, RT_SRC_POS);
1829 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1830 RDH = 0;
1831 /*
1832 * Compute current receive queue length and fire RXDMT0 interrupt
1833 * if we are low on receive buffers
1834 */
1835 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1836 /*
1837 * The minimum threshold is controlled by RDMTS bits of RCTL:
1838 * 00 = 1/2 of RDLEN
1839 * 01 = 1/4 of RDLEN
1840 * 10 = 1/8 of RDLEN
1841 * 11 = reserved
1842 */
1843 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
1844 if (uRQueueLen <= uMinRQThreshold)
1845 {
1846 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
1847 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
1848 INSTANCE(pState), RDH, RDT, uRQueueLen, uMinRQThreshold));
1849 E1K_INC_ISTAT_CNT(pState->uStatIntRXDMT0);
1850 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXDMT0);
1851 }
1852 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
1853 INSTANCE(pState), RDH, RDT, uRQueueLen));
1854 //e1kCsLeave(pState);
1855}
1856
1857/**
1858 * Store a fragment of received packet that fits into the next available RX
1859 * buffer.
1860 *
1861 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
1862 *
1863 * @param pState The device state structure.
1864 * @param pDesc The next available RX descriptor.
1865 * @param pvBuf The fragment.
1866 * @param cb The size of the fragment.
1867 */
1868static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
1869{
1870 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
1871 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pState->szInstance, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
1872 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
1873 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
1874 /* Write back the descriptor */
1875 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
1876 e1kPrintRDesc(pState, pDesc);
1877 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
1878 /* Advance head */
1879 e1kAdvanceRDH(pState);
1880 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", INSTANCE(pState), pDesc->fEOP, RDTR, RADV));
1881 if (pDesc->status.fEOP)
1882 {
1883 /* Complete packet has been stored -- it is time to let the guest know. */
1884#ifdef E1K_USE_RX_TIMERS
1885 if (RDTR)
1886 {
1887 /* Arm the timer to fire in RDTR usec (discard .024) */
1888 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
1889 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
1890 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
1891 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
1892 }
1893 else
1894 {
1895#endif
1896 /* 0 delay means immediate interrupt */
1897 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
1898 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
1899#ifdef E1K_USE_RX_TIMERS
1900 }
1901#endif
1902 }
1903 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
1904}
1905
1906/**
1907 * Returns true if it is a broadcast packet.
1908 *
1909 * @returns true if destination address indicates broadcast.
1910 * @param pvBuf The ethernet packet.
1911 */
1912DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
1913{
1914 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1915 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
1916}
1917
1918/**
1919 * Returns true if it is a multicast packet.
1920 *
1921 * @remarks returns true for broadcast packets as well.
1922 * @returns true if destination address indicates multicast.
1923 * @param pvBuf The ethernet packet.
1924 */
1925DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
1926{
1927 return (*(char*)pvBuf) & 1;
1928}
1929
1930/**
1931 * Set IXSM, IPCS and TCPCS flags according to the packet type.
1932 *
1933 * @remarks We emulate checksum offloading for major packets types only.
1934 *
1935 * @returns VBox status code.
1936 * @param pState The device state structure.
1937 * @param pFrame The available data.
1938 * @param cb Number of bytes available in the buffer.
1939 * @param status Bit fields containing status info.
1940 */
1941static int e1kRxChecksumOffload(E1KSTATE* pState, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
1942{
1943 /** @todo
1944 * It is not safe to bypass checksum verification for packets coming
1945 * from real wire. We currently unable to tell where packets are
1946 * coming from so we tell the driver to ignore our checksum flags
1947 * and do verification in software.
1948 */
1949#if 0
1950 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
1951
1952 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", INSTANCE(pState), uEtherType));
1953
1954 switch (uEtherType)
1955 {
1956 case 0x800: /* IPv4 */
1957 {
1958 pStatus->fIXSM = false;
1959 pStatus->fIPCS = true;
1960 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
1961 /* TCP/UDP checksum offloading works with TCP and UDP only */
1962 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
1963 break;
1964 }
1965 case 0x86DD: /* IPv6 */
1966 pStatus->fIXSM = false;
1967 pStatus->fIPCS = false;
1968 pStatus->fTCPCS = true;
1969 break;
1970 default: /* ARP, VLAN, etc. */
1971 pStatus->fIXSM = true;
1972 break;
1973 }
1974#else
1975 pStatus->fIXSM = true;
1976#endif
1977 return VINF_SUCCESS;
1978}
1979
1980/**
1981 * Pad and store received packet.
1982 *
1983 * @remarks Make sure that the packet appears to upper layer as one coming
1984 * from real Ethernet: pad it and insert FCS.
1985 *
1986 * @returns VBox status code.
1987 * @param pState The device state structure.
1988 * @param pvBuf The available data.
1989 * @param cb Number of bytes available in the buffer.
1990 * @param status Bit fields containing status info.
1991 */
1992static int e1kHandleRxPacket(E1KSTATE* pState, const void *pvBuf, size_t cb, E1KRXDST status)
1993{
1994#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
1995 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
1996 uint8_t *ptr = rxPacket;
1997
1998 int rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
1999 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2000 return rc;
2001
2002 if (cb > 70) /* unqualified guess */
2003 pState->led.Asserted.s.fReading = pState->led.Actual.s.fReading = 1;
2004
2005 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2006 Assert(cb > 16);
2007 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2008 E1kLog3(("%s Max RX packet size is %u\n", INSTANCE(pState), cbMax));
2009 if (status.fVP)
2010 {
2011 /* VLAN packet -- strip VLAN tag in VLAN mode */
2012 if ((CTRL & CTRL_VME) && cb > 16)
2013 {
2014 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2015 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2016 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2017 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2018 cb -= 4;
2019 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2020 INSTANCE(pState), status.u16Special, cb));
2021 }
2022 else
2023 status.fVP = false; /* Set VP only if we stripped the tag */
2024 }
2025 else
2026 memcpy(rxPacket, pvBuf, cb);
2027 /* Pad short packets */
2028 if (cb < 60)
2029 {
2030 memset(rxPacket + cb, 0, 60 - cb);
2031 cb = 60;
2032 }
2033 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2034 {
2035 STAM_PROFILE_ADV_START(&pState->StatReceiveCRC, a);
2036 /*
2037 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2038 * is ignored by most of drivers we may as well save us the trouble
2039 * of calculating it (see EthernetCRC CFGM parameter).
2040 */
2041 if (pState->fEthernetCRC)
2042 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2043 cb += sizeof(uint32_t);
2044 STAM_PROFILE_ADV_STOP(&pState->StatReceiveCRC, a);
2045 E1kLog3(("%s Added FCS (cb=%u)\n", INSTANCE(pState), cb));
2046 }
2047 /* Compute checksum of complete packet */
2048 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2049 e1kRxChecksumOffload(pState, rxPacket, cb, &status);
2050
2051 /* Update stats */
2052 E1K_INC_CNT32(GPRC);
2053 if (e1kIsBroadcast(pvBuf))
2054 E1K_INC_CNT32(BPRC);
2055 else if (e1kIsMulticast(pvBuf))
2056 E1K_INC_CNT32(MPRC);
2057 /* Update octet receive counter */
2058 E1K_ADD_CNT64(GORCL, GORCH, cb);
2059 STAM_REL_COUNTER_ADD(&pState->StatReceiveBytes, cb);
2060 if (cb == 64)
2061 E1K_INC_CNT32(PRC64);
2062 else if (cb < 128)
2063 E1K_INC_CNT32(PRC127);
2064 else if (cb < 256)
2065 E1K_INC_CNT32(PRC255);
2066 else if (cb < 512)
2067 E1K_INC_CNT32(PRC511);
2068 else if (cb < 1024)
2069 E1K_INC_CNT32(PRC1023);
2070 else
2071 E1K_INC_CNT32(PRC1522);
2072
2073 E1K_INC_ISTAT_CNT(pState->uStatRxFrm);
2074
2075 if (RDH == RDT)
2076 {
2077 E1kLog(("%s Out of receive buffers, dropping the packet",
2078 INSTANCE(pState)));
2079 }
2080 /* Store the packet to receive buffers */
2081 while (RDH != RDT)
2082 {
2083 /* Load the descriptor pointed by head */
2084 E1KRXDESC desc;
2085 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2086 &desc, sizeof(desc));
2087 if (desc.u64BufAddr)
2088 {
2089 /* Update descriptor */
2090 desc.status = status;
2091 desc.u16Checksum = checksum;
2092 desc.status.fDD = true;
2093
2094 /*
2095 * We need to leave Rx critical section here or we risk deadlocking
2096 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2097 * page or has an access handler associated with it.
2098 * Note that it is safe to leave the critical section here since e1kRegWriteRDT()
2099 * modifies RDT only.
2100 */
2101 if (cb > pState->u16RxBSize)
2102 {
2103 desc.status.fEOP = false;
2104 e1kCsRxLeave(pState);
2105 e1kStoreRxFragment(pState, &desc, ptr, pState->u16RxBSize);
2106 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2107 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2108 return rc;
2109 ptr += pState->u16RxBSize;
2110 cb -= pState->u16RxBSize;
2111 }
2112 else
2113 {
2114 desc.status.fEOP = true;
2115 e1kCsRxLeave(pState);
2116 e1kStoreRxFragment(pState, &desc, ptr, cb);
2117 pState->led.Actual.s.fReading = 0;
2118 return VINF_SUCCESS;
2119 }
2120 /* Note: RDH is advanced by e1kStoreRxFragment! */
2121 }
2122 else
2123 {
2124 desc.status.fDD = true;
2125 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns),
2126 e1kDescAddr(RDBAH, RDBAL, RDH),
2127 &desc, sizeof(desc));
2128 e1kAdvanceRDH(pState);
2129 }
2130 }
2131
2132 if (cb > 0)
2133 E1kLog(("%s Out of receive buffers, dropping %u bytes", INSTANCE(pState), cb));
2134
2135 pState->led.Actual.s.fReading = 0;
2136
2137 e1kCsRxLeave(pState);
2138
2139 return VINF_SUCCESS;
2140#else
2141 return VERR_INTERNAL_ERROR_2;
2142#endif
2143}
2144
2145
2146#if 0 /* unused */
2147/**
2148 * Read handler for Device Status register.
2149 *
2150 * Get the link status from PHY.
2151 *
2152 * @returns VBox status code.
2153 *
2154 * @param pState The device state structure.
2155 * @param offset Register offset in memory-mapped frame.
2156 * @param index Register index in register array.
2157 * @param mask Used to implement partial reads (8 and 16-bit).
2158 */
2159static int e1kRegReadCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2160{
2161 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2162 INSTANCE(pState), (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2163 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2164 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2165 {
2166 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2167 if (Phy::readMDIO(&pState->phy))
2168 *pu32Value = CTRL | CTRL_MDIO;
2169 else
2170 *pu32Value = CTRL & ~CTRL_MDIO;
2171 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2172 INSTANCE(pState), !!(*pu32Value & CTRL_MDIO)));
2173 }
2174 else
2175 {
2176 /* MDIO pin is used for output, ignore it */
2177 *pu32Value = CTRL;
2178 }
2179 return VINF_SUCCESS;
2180}
2181#endif /* unused */
2182
2183/**
2184 * Write handler for Device Control register.
2185 *
2186 * Handles reset.
2187 *
2188 * @param pState The device state structure.
2189 * @param offset Register offset in memory-mapped frame.
2190 * @param index Register index in register array.
2191 * @param value The value to store.
2192 * @param mask Used to implement partial writes (8 and 16-bit).
2193 * @thread EMT
2194 */
2195static int e1kRegWriteCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2196{
2197 int rc = VINF_SUCCESS;
2198
2199 if (value & CTRL_RESET)
2200 { /* RST */
2201#ifndef IN_RING3
2202 return VINF_IOM_R3_IOPORT_WRITE;
2203#else
2204 e1kHardReset(pState);
2205#endif
2206 }
2207 else
2208 {
2209 if ( (value & CTRL_SLU)
2210 && pState->fCableConnected
2211 && !(STATUS & STATUS_LU))
2212 {
2213 /* The driver indicates that we should bring up the link */
2214 /* Do so in 5 seconds. */
2215 e1kArmTimer(pState, pState->CTX_SUFF(pLUTimer), 5000000);
2216 /*
2217 * Change the status (but not PHY status) anyway as Windows expects
2218 * it for 82543GC.
2219 */
2220 STATUS |= STATUS_LU;
2221 }
2222 if (value & CTRL_VME)
2223 {
2224 E1kLog(("%s VLAN Mode Enabled\n", INSTANCE(pState)));
2225 }
2226 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2227 INSTANCE(pState), (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2228 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2229 if (value & CTRL_MDC)
2230 {
2231 if (value & CTRL_MDIO_DIR)
2232 {
2233 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", INSTANCE(pState), !!(value & CTRL_MDIO)));
2234 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2235 Phy::writeMDIO(&pState->phy, !!(value & CTRL_MDIO));
2236 }
2237 else
2238 {
2239 if (Phy::readMDIO(&pState->phy))
2240 value |= CTRL_MDIO;
2241 else
2242 value &= ~CTRL_MDIO;
2243 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2244 INSTANCE(pState), !!(value & CTRL_MDIO)));
2245 }
2246 }
2247 rc = e1kRegWriteDefault(pState, offset, index, value);
2248 }
2249
2250 return rc;
2251}
2252
2253/**
2254 * Write handler for EEPROM/Flash Control/Data register.
2255 *
2256 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2257 *
2258 * @param pState The device state structure.
2259 * @param offset Register offset in memory-mapped frame.
2260 * @param index Register index in register array.
2261 * @param value The value to store.
2262 * @param mask Used to implement partial writes (8 and 16-bit).
2263 * @thread EMT
2264 */
2265static int e1kRegWriteEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2266{
2267#ifdef IN_RING3
2268 /* So far we are concerned with lower byte only */
2269 if ((EECD & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2270 {
2271 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2272 /* Note: 82543GC does not need to request EEPROM access */
2273 STAM_PROFILE_ADV_START(&pState->StatEEPROMWrite, a);
2274 pState->eeprom.write(value & EECD_EE_WIRES);
2275 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMWrite, a);
2276 }
2277 if (value & EECD_EE_REQ)
2278 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2279 else
2280 EECD &= ~EECD_EE_GNT;
2281 //e1kRegWriteDefault(pState, offset, index, value );
2282
2283 return VINF_SUCCESS;
2284#else /* !IN_RING3 */
2285 return VINF_IOM_R3_MMIO_WRITE;
2286#endif /* !IN_RING3 */
2287}
2288
2289/**
2290 * Read handler for EEPROM/Flash Control/Data register.
2291 *
2292 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2293 *
2294 * @returns VBox status code.
2295 *
2296 * @param pState The device state structure.
2297 * @param offset Register offset in memory-mapped frame.
2298 * @param index Register index in register array.
2299 * @param mask Used to implement partial reads (8 and 16-bit).
2300 * @thread EMT
2301 */
2302static int e1kRegReadEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2303{
2304#ifdef IN_RING3
2305 uint32_t value;
2306 int rc = e1kRegReadDefault(pState, offset, index, &value);
2307 if (RT_SUCCESS(rc))
2308 {
2309 if ((value & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2310 {
2311 /* Note: 82543GC does not need to request EEPROM access */
2312 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2313 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2314 value |= pState->eeprom.read();
2315 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2316 }
2317 *pu32Value = value;
2318 }
2319
2320 return rc;
2321#else /* !IN_RING3 */
2322 return VINF_IOM_R3_MMIO_READ;
2323#endif /* !IN_RING3 */
2324}
2325
2326/**
2327 * Write handler for EEPROM Read register.
2328 *
2329 * Handles EEPROM word access requests, reads EEPROM and stores the result
2330 * into DATA field.
2331 *
2332 * @param pState The device state structure.
2333 * @param offset Register offset in memory-mapped frame.
2334 * @param index Register index in register array.
2335 * @param value The value to store.
2336 * @param mask Used to implement partial writes (8 and 16-bit).
2337 * @thread EMT
2338 */
2339static int e1kRegWriteEERD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2340{
2341#ifdef IN_RING3
2342 /* Make use of 'writable' and 'readable' masks. */
2343 e1kRegWriteDefault(pState, offset, index, value);
2344 /* DONE and DATA are set only if read was triggered by START. */
2345 if (value & EERD_START)
2346 {
2347 uint16_t tmp;
2348 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2349 if (pState->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2350 SET_BITS(EERD, DATA, tmp);
2351 EERD |= EERD_DONE;
2352 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2353 }
2354
2355 return VINF_SUCCESS;
2356#else /* !IN_RING3 */
2357 return VINF_IOM_R3_MMIO_WRITE;
2358#endif /* !IN_RING3 */
2359}
2360
2361
2362/**
2363 * Write handler for MDI Control register.
2364 *
2365 * Handles PHY read/write requests; forwards requests to internal PHY device.
2366 *
2367 * @param pState The device state structure.
2368 * @param offset Register offset in memory-mapped frame.
2369 * @param index Register index in register array.
2370 * @param value The value to store.
2371 * @param mask Used to implement partial writes (8 and 16-bit).
2372 * @thread EMT
2373 */
2374static int e1kRegWriteMDIC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2375{
2376 if (value & MDIC_INT_EN)
2377 {
2378 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2379 INSTANCE(pState)));
2380 }
2381 else if (value & MDIC_READY)
2382 {
2383 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2384 INSTANCE(pState)));
2385 }
2386 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2387 {
2388 E1kLog(("%s ERROR! Access to invalid PHY detected, phy=%d.\n",
2389 INSTANCE(pState), GET_BITS_V(value, MDIC, PHY)));
2390 }
2391 else
2392 {
2393 /* Store the value */
2394 e1kRegWriteDefault(pState, offset, index, value);
2395 STAM_COUNTER_INC(&pState->StatPHYAccesses);
2396 /* Forward op to PHY */
2397 if (value & MDIC_OP_READ)
2398 SET_BITS(MDIC, DATA, Phy::readRegister(&pState->phy, GET_BITS_V(value, MDIC, REG)));
2399 else
2400 Phy::writeRegister(&pState->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2401 /* Let software know that we are done */
2402 MDIC |= MDIC_READY;
2403 }
2404
2405 return VINF_SUCCESS;
2406}
2407
2408/**
2409 * Write handler for Interrupt Cause Read register.
2410 *
2411 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2412 *
2413 * @param pState The device state structure.
2414 * @param offset Register offset in memory-mapped frame.
2415 * @param index Register index in register array.
2416 * @param value The value to store.
2417 * @param mask Used to implement partial writes (8 and 16-bit).
2418 * @thread EMT
2419 */
2420static int e1kRegWriteICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2421{
2422 ICR &= ~value;
2423
2424 return VINF_SUCCESS;
2425}
2426
2427/**
2428 * Read handler for Interrupt Cause Read register.
2429 *
2430 * Reading this register acknowledges all interrupts.
2431 *
2432 * @returns VBox status code.
2433 *
2434 * @param pState The device state structure.
2435 * @param offset Register offset in memory-mapped frame.
2436 * @param index Register index in register array.
2437 * @param mask Not used.
2438 * @thread EMT
2439 */
2440static int e1kRegReadICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2441{
2442 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_READ);
2443 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2444 return rc;
2445
2446 uint32_t value = 0;
2447 rc = e1kRegReadDefault(pState, offset, index, &value);
2448 if (RT_SUCCESS(rc))
2449 {
2450 if (value)
2451 {
2452 /*
2453 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2454 * with disabled interrupts.
2455 */
2456 //if (IMS)
2457 if (1)
2458 {
2459 /*
2460 * Interrupts were enabled -- we are supposedly at the very
2461 * beginning of interrupt handler
2462 */
2463 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2464 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", INSTANCE(pState), ICR));
2465 /* Clear all pending interrupts */
2466 ICR = 0;
2467 pState->fIntRaised = false;
2468 /* Lower(0) INTA(0) */
2469 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2470
2471 pState->u64AckedAt = TMTimerGet(pState->CTX_SUFF(pIntTimer));
2472 if (pState->fIntMaskUsed)
2473 pState->fDelayInts = true;
2474 }
2475 else
2476 {
2477 /*
2478 * Interrupts are disabled -- in windows guests ICR read is done
2479 * just before re-enabling interrupts
2480 */
2481 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", INSTANCE(pState), ICR));
2482 }
2483 }
2484 *pu32Value = value;
2485 }
2486 e1kCsLeave(pState);
2487
2488 return rc;
2489}
2490
2491/**
2492 * Write handler for Interrupt Cause Set register.
2493 *
2494 * Bits corresponding to 1s in 'value' will be set in ICR register.
2495 *
2496 * @param pState The device state structure.
2497 * @param offset Register offset in memory-mapped frame.
2498 * @param index Register index in register array.
2499 * @param value The value to store.
2500 * @param mask Used to implement partial writes (8 and 16-bit).
2501 * @thread EMT
2502 */
2503static int e1kRegWriteICS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2504{
2505 E1K_INC_ISTAT_CNT(pState->uStatIntICS);
2506 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, value & s_e1kRegMap[ICS_IDX].writable);
2507}
2508
2509/**
2510 * Write handler for Interrupt Mask Set register.
2511 *
2512 * Will trigger pending interrupts.
2513 *
2514 * @param pState The device state structure.
2515 * @param offset Register offset in memory-mapped frame.
2516 * @param index Register index in register array.
2517 * @param value The value to store.
2518 * @param mask Used to implement partial writes (8 and 16-bit).
2519 * @thread EMT
2520 */
2521static int e1kRegWriteIMS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2522{
2523 IMS |= value;
2524 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2525 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", INSTANCE(pState)));
2526 /* Mask changes, we need to raise pending interrupts. */
2527 if ((ICR & IMS) && !pState->fLocked)
2528 {
2529 E1kLog2(("%s e1kRegWriteIMS: IRQ pending (%08x), arming late int timer...\n",
2530 INSTANCE(pState), ICR));
2531 /* Raising an interrupt immediately causes win7 to hang upon NIC reconfiguration (#5023) */
2532 TMTimerSet(pState->CTX_SUFF(pIntTimer), TMTimerFromNano(pState->CTX_SUFF(pIntTimer), ITR * 256) +
2533 TMTimerGet(pState->CTX_SUFF(pIntTimer)));
2534 }
2535
2536 return VINF_SUCCESS;
2537}
2538
2539/**
2540 * Write handler for Interrupt Mask Clear register.
2541 *
2542 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2543 *
2544 * @param pState The device state structure.
2545 * @param offset Register offset in memory-mapped frame.
2546 * @param index Register index in register array.
2547 * @param value The value to store.
2548 * @param mask Used to implement partial writes (8 and 16-bit).
2549 * @thread EMT
2550 */
2551static int e1kRegWriteIMC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2552{
2553 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2554 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2555 return rc;
2556 if (pState->fIntRaised)
2557 {
2558 /*
2559 * Technically we should reset fIntRaised in ICR read handler, but it will cause
2560 * Windows to freeze since it may receive an interrupt while still in the very beginning
2561 * of interrupt handler.
2562 */
2563 E1K_INC_ISTAT_CNT(pState->uStatIntLower);
2564 STAM_COUNTER_INC(&pState->StatIntsPrevented);
2565 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
2566 /* Lower(0) INTA(0) */
2567 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2568 pState->fIntRaised = false;
2569 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", INSTANCE(pState), ICR));
2570 }
2571 IMS &= ~value;
2572 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", INSTANCE(pState)));
2573 e1kCsLeave(pState);
2574
2575 return VINF_SUCCESS;
2576}
2577
2578/**
2579 * Write handler for Receive Control register.
2580 *
2581 * @param pState The device state structure.
2582 * @param offset Register offset in memory-mapped frame.
2583 * @param index Register index in register array.
2584 * @param value The value to store.
2585 * @param mask Used to implement partial writes (8 and 16-bit).
2586 * @thread EMT
2587 */
2588static int e1kRegWriteRCTL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2589{
2590 /* Update promiscuous mode */
2591 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
2592 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
2593 {
2594 /* Promiscuity has changed, pass the knowledge on. */
2595#ifndef IN_RING3
2596 return VINF_IOM_R3_IOPORT_WRITE;
2597#else
2598 if (pState->pDrvR3)
2599 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, fBecomePromiscous);
2600#endif
2601 }
2602
2603 /* Adjust receive buffer size */
2604 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
2605 if (value & RCTL_BSEX)
2606 cbRxBuf *= 16;
2607 if (cbRxBuf != pState->u16RxBSize)
2608 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
2609 INSTANCE(pState), cbRxBuf, pState->u16RxBSize));
2610 pState->u16RxBSize = cbRxBuf;
2611
2612 /* Update the register */
2613 e1kRegWriteDefault(pState, offset, index, value);
2614
2615 return VINF_SUCCESS;
2616}
2617
2618/**
2619 * Write handler for Packet Buffer Allocation register.
2620 *
2621 * TXA = 64 - RXA.
2622 *
2623 * @param pState The device state structure.
2624 * @param offset Register offset in memory-mapped frame.
2625 * @param index Register index in register array.
2626 * @param value The value to store.
2627 * @param mask Used to implement partial writes (8 and 16-bit).
2628 * @thread EMT
2629 */
2630static int e1kRegWritePBA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2631{
2632 e1kRegWriteDefault(pState, offset, index, value);
2633 PBA_st->txa = 64 - PBA_st->rxa;
2634
2635 return VINF_SUCCESS;
2636}
2637
2638/**
2639 * Write handler for Receive Descriptor Tail register.
2640 *
2641 * @remarks Write into RDT forces switch to HC and signal to
2642 * e1kNetworkDown_WaitReceiveAvail().
2643 *
2644 * @returns VBox status code.
2645 *
2646 * @param pState The device state structure.
2647 * @param offset Register offset in memory-mapped frame.
2648 * @param index Register index in register array.
2649 * @param value The value to store.
2650 * @param mask Used to implement partial writes (8 and 16-bit).
2651 * @thread EMT
2652 */
2653static int e1kRegWriteRDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2654{
2655#ifndef IN_RING3
2656 /* XXX */
2657// return VINF_IOM_R3_MMIO_WRITE;
2658#endif
2659 int rc = e1kCsRxEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2660 if (RT_LIKELY(rc == VINF_SUCCESS))
2661 {
2662 E1kLog(("%s e1kRegWriteRDT\n", INSTANCE(pState)));
2663 rc = e1kRegWriteDefault(pState, offset, index, value);
2664 e1kCsRxLeave(pState);
2665 if (RT_SUCCESS(rc))
2666 {
2667/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
2668 * without requiring any context switches. We should also check the
2669 * wait condition before bothering to queue the item as we're currently
2670 * queuing thousands of items per second here in a normal transmit
2671 * scenario. Expect performance changes when fixing this! */
2672#ifdef IN_RING3
2673 /* Signal that we have more receive descriptors available. */
2674 e1kWakeupReceive(pState->CTX_SUFF(pDevIns));
2675#else
2676 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pCanRxQueue));
2677 if (pItem)
2678 PDMQueueInsert(pState->CTX_SUFF(pCanRxQueue), pItem);
2679#endif
2680 }
2681 }
2682 return rc;
2683}
2684
2685/**
2686 * Write handler for Receive Delay Timer register.
2687 *
2688 * @param pState The device state structure.
2689 * @param offset Register offset in memory-mapped frame.
2690 * @param index Register index in register array.
2691 * @param value The value to store.
2692 * @param mask Used to implement partial writes (8 and 16-bit).
2693 * @thread EMT
2694 */
2695static int e1kRegWriteRDTR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2696{
2697 e1kRegWriteDefault(pState, offset, index, value);
2698 if (value & RDTR_FPD)
2699 {
2700 /* Flush requested, cancel both timers and raise interrupt */
2701#ifdef E1K_USE_RX_TIMERS
2702 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
2703 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
2704#endif
2705 E1K_INC_ISTAT_CNT(pState->uStatIntRDTR);
2706 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
2707 }
2708
2709 return VINF_SUCCESS;
2710}
2711
2712DECLINLINE(uint32_t) e1kGetTxLen(E1KSTATE* pState)
2713{
2714 /**
2715 * Make sure TDT won't change during computation. EMT may modify TDT at
2716 * any moment.
2717 */
2718 uint32_t tdt = TDT;
2719 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
2720}
2721
2722#ifdef IN_RING3
2723#ifdef E1K_USE_TX_TIMERS
2724
2725/**
2726 * Transmit Interrupt Delay Timer handler.
2727 *
2728 * @remarks We only get here when the timer expires.
2729 *
2730 * @param pDevIns Pointer to device instance structure.
2731 * @param pTimer Pointer to the timer.
2732 * @param pvUser NULL.
2733 * @thread EMT
2734 */
2735static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2736{
2737 E1KSTATE *pState = (E1KSTATE *)pvUser;
2738
2739 E1K_INC_ISTAT_CNT(pState->uStatTID);
2740 /* Cancel absolute delay timer as we have already got attention */
2741#ifndef E1K_NO_TAD
2742 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
2743#endif /* E1K_NO_TAD */
2744 e1kRaiseInterrupt(pState, ICR_TXDW);
2745}
2746
2747/**
2748 * Transmit Absolute Delay Timer handler.
2749 *
2750 * @remarks We only get here when the timer expires.
2751 *
2752 * @param pDevIns Pointer to device instance structure.
2753 * @param pTimer Pointer to the timer.
2754 * @param pvUser NULL.
2755 * @thread EMT
2756 */
2757static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2758{
2759 E1KSTATE *pState = (E1KSTATE *)pvUser;
2760
2761 E1K_INC_ISTAT_CNT(pState->uStatTAD);
2762 /* Cancel interrupt delay timer as we have already got attention */
2763 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
2764 e1kRaiseInterrupt(pState, ICR_TXDW);
2765}
2766
2767#endif /* E1K_USE_TX_TIMERS */
2768#ifdef E1K_USE_RX_TIMERS
2769
2770/**
2771 * Receive Interrupt Delay Timer handler.
2772 *
2773 * @remarks We only get here when the timer expires.
2774 *
2775 * @param pDevIns Pointer to device instance structure.
2776 * @param pTimer Pointer to the timer.
2777 * @param pvUser NULL.
2778 * @thread EMT
2779 */
2780static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2781{
2782 E1KSTATE *pState = (E1KSTATE *)pvUser;
2783
2784 E1K_INC_ISTAT_CNT(pState->uStatRID);
2785 /* Cancel absolute delay timer as we have already got attention */
2786 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
2787 e1kRaiseInterrupt(pState, ICR_RXT0);
2788}
2789
2790/**
2791 * Receive Absolute Delay Timer handler.
2792 *
2793 * @remarks We only get here when the timer expires.
2794 *
2795 * @param pDevIns Pointer to device instance structure.
2796 * @param pTimer Pointer to the timer.
2797 * @param pvUser NULL.
2798 * @thread EMT
2799 */
2800static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2801{
2802 E1KSTATE *pState = (E1KSTATE *)pvUser;
2803
2804 E1K_INC_ISTAT_CNT(pState->uStatRAD);
2805 /* Cancel interrupt delay timer as we have already got attention */
2806 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
2807 e1kRaiseInterrupt(pState, ICR_RXT0);
2808}
2809
2810#endif /* E1K_USE_RX_TIMERS */
2811
2812/**
2813 * Late Interrupt Timer handler.
2814 *
2815 * @param pDevIns Pointer to device instance structure.
2816 * @param pTimer Pointer to the timer.
2817 * @param pvUser NULL.
2818 * @thread EMT
2819 */
2820static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2821{
2822 E1KSTATE *pState = (E1KSTATE *)pvUser;
2823
2824 STAM_PROFILE_ADV_START(&pState->StatLateIntTimer, a);
2825 STAM_COUNTER_INC(&pState->StatLateInts);
2826 E1K_INC_ISTAT_CNT(pState->uStatIntLate);
2827#if 0
2828 if (pState->iStatIntLost > -100)
2829 pState->iStatIntLost--;
2830#endif
2831 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, 0);
2832 STAM_PROFILE_ADV_STOP(&pState->StatLateIntTimer, a);
2833}
2834
2835/**
2836 * Link Up Timer handler.
2837 *
2838 * @param pDevIns Pointer to device instance structure.
2839 * @param pTimer Pointer to the timer.
2840 * @param pvUser NULL.
2841 * @thread EMT
2842 */
2843static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
2844{
2845 E1KSTATE *pState = (E1KSTATE *)pvUser;
2846
2847 /*
2848 * This can happen if we set the link status to down when the Link up timer was
2849 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
2850 * and connect+disconnect the cable very quick.
2851 */
2852 if (!pState->fCableConnected)
2853 return;
2854
2855 STATUS |= STATUS_LU;
2856 Phy::setLinkStatus(&pState->phy, true);
2857 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
2858}
2859
2860#endif /* IN_RING3 */
2861
2862/**
2863 * Sets up the GSO context according to the TSE new context descriptor.
2864 *
2865 * @param pGso The GSO context to setup.
2866 * @param pCtx The context descriptor.
2867 */
2868DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
2869{
2870 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
2871
2872 /*
2873 * See if the context descriptor describes something that could be TCP or
2874 * UDP over IPv[46].
2875 */
2876 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
2877 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
2878 {
2879 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
2880 return;
2881 }
2882 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
2883 {
2884 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
2885 return;
2886 }
2887 if (RT_UNLIKELY( pCtx->dw2.fTCP
2888 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
2889 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
2890 {
2891 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
2892 return;
2893 }
2894
2895 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
2896 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
2897 {
2898 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
2899 return;
2900 }
2901
2902 /* IPv4 checksum offset. */
2903 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
2904 {
2905 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
2906 return;
2907 }
2908
2909 /* TCP/UDP checksum offsets. */
2910 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
2911 != ( pCtx->dw2.fTCP
2912 ? RT_UOFFSETOF(RTNETTCP, th_sum)
2913 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
2914 {
2915 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
2916 return;
2917 }
2918
2919 /*
2920 * Because of internal networking using a 16-bit size field for GSO context
2921 * plus frame, we have to make sure we don't exceed this.
2922 */
2923 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
2924 {
2925 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
2926 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
2927 return;
2928 }
2929
2930 /*
2931 * We're good for now - we'll do more checks when seeing the data.
2932 * So, figure the type of offloading and setup the context.
2933 */
2934 if (pCtx->dw2.fIP)
2935 {
2936 if (pCtx->dw2.fTCP)
2937 {
2938 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
2939 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
2940 }
2941 else
2942 {
2943 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
2944 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
2945 }
2946 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
2947 * this yet it seems)... */
2948 }
2949 else
2950 {
2951 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /* @todo IPv6 UFO */
2952 if (pCtx->dw2.fTCP)
2953 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
2954 else
2955 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
2956 }
2957 pGso->offHdr1 = pCtx->ip.u8CSS;
2958 pGso->offHdr2 = pCtx->tu.u8CSS;
2959 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
2960 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
2961 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
2962 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
2963 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
2964}
2965
2966/**
2967 * Checks if we can use GSO processing for the current TSE frame.
2968 *
2969 * @param pGso The GSO context.
2970 * @param pData The first data descriptor of the frame.
2971 * @param pCtx The TSO context descriptor.
2972 */
2973DECLINLINE(bool) e1kCanDoGso(PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
2974{
2975 if (!pData->cmd.fTSE)
2976 {
2977 E1kLog2(("e1kCanDoGso: !TSE\n"));
2978 return false;
2979 }
2980 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
2981 {
2982 E1kLog(("e1kCanDoGso: VLE\n"));
2983 return false;
2984 }
2985
2986 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
2987 {
2988 case PDMNETWORKGSOTYPE_IPV4_TCP:
2989 case PDMNETWORKGSOTYPE_IPV4_UDP:
2990 if (!pData->dw3.fIXSM)
2991 {
2992 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
2993 return false;
2994 }
2995 if (!pData->dw3.fTXSM)
2996 {
2997 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
2998 return false;
2999 }
3000 /** @todo what more check should we perform here? Ethernet frame type? */
3001 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3002 return true;
3003
3004 case PDMNETWORKGSOTYPE_IPV6_TCP:
3005 case PDMNETWORKGSOTYPE_IPV6_UDP:
3006 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3007 {
3008 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3009 return false;
3010 }
3011 if (!pData->dw3.fTXSM)
3012 {
3013 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3014 return false;
3015 }
3016 /** @todo what more check should we perform here? Ethernet frame type? */
3017 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3018 return true;
3019
3020 default:
3021 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3022 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3023 return false;
3024 }
3025}
3026
3027/**
3028 * Frees the current xmit buffer.
3029 *
3030 * @param pState The device state structure.
3031 */
3032static void e1kXmitFreeBuf(E1KSTATE *pState)
3033{
3034 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3035 if (pSg)
3036 {
3037 pState->CTX_SUFF(pTxSg) = NULL;
3038
3039 if (pSg->pvAllocator != pState)
3040 {
3041 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3042 if (pDrv)
3043 pDrv->pfnFreeBuf(pDrv, pSg);
3044 }
3045 else
3046 {
3047 /* loopback */
3048 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3049 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3050 pSg->fFlags = 0;
3051 pSg->pvAllocator = NULL;
3052 }
3053 }
3054}
3055
3056#ifndef E1K_WITH_TXD_CACHE
3057/**
3058 * Allocates an xmit buffer.
3059 *
3060 * @returns See PDMINETWORKUP::pfnAllocBuf.
3061 * @param pState The device state structure.
3062 * @param cbMin The minimum frame size.
3063 * @param fExactSize Whether cbMin is exact or if we have to max it
3064 * out to the max MTU size.
3065 * @param fGso Whether this is a GSO frame or not.
3066 */
3067DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, size_t cbMin, bool fExactSize, bool fGso)
3068{
3069 /* Adjust cbMin if necessary. */
3070 if (!fExactSize)
3071 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3072
3073 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3074 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3075 e1kXmitFreeBuf(pState);
3076 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3077
3078 /*
3079 * Allocate the buffer.
3080 */
3081 PPDMSCATTERGATHER pSg;
3082 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3083 {
3084 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3085 if (RT_UNLIKELY(!pDrv))
3086 return VERR_NET_DOWN;
3087 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pState->GsoCtx : NULL, &pSg);
3088 if (RT_FAILURE(rc))
3089 {
3090 /* Suspend TX as we are out of buffers atm */
3091 STATUS |= STATUS_TXOFF;
3092 return rc;
3093 }
3094 }
3095 else
3096 {
3097 /* Create a loopback using the fallback buffer and preallocated SG. */
3098 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3099 pSg = &pState->uTxFallback.Sg;
3100 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3101 pSg->cbUsed = 0;
3102 pSg->cbAvailable = 0;
3103 pSg->pvAllocator = pState;
3104 pSg->pvUser = NULL; /* No GSO here. */
3105 pSg->cSegs = 1;
3106 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3107 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3108 }
3109
3110 pState->CTX_SUFF(pTxSg) = pSg;
3111 return VINF_SUCCESS;
3112}
3113#else /* E1K_WITH_TXD_CACHE */
3114/**
3115 * Allocates an xmit buffer.
3116 *
3117 * @returns See PDMINETWORKUP::pfnAllocBuf.
3118 * @param pState The device state structure.
3119 * @param cbMin The minimum frame size.
3120 * @param fExactSize Whether cbMin is exact or if we have to max it
3121 * out to the max MTU size.
3122 * @param fGso Whether this is a GSO frame or not.
3123 */
3124DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, bool fGso)
3125{
3126 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3127 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3128 e1kXmitFreeBuf(pState);
3129 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3130
3131 /*
3132 * Allocate the buffer.
3133 */
3134 PPDMSCATTERGATHER pSg;
3135 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3136 {
3137 Assert(pState->cbTxAlloc != 0);
3138 if (pState->cbTxAlloc == 0)
3139 return VERR_NET_IO_ERROR;
3140
3141 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3142 if (RT_UNLIKELY(!pDrv))
3143 return VERR_NET_DOWN;
3144 int rc = pDrv->pfnAllocBuf(pDrv, pState->cbTxAlloc, fGso ? &pState->GsoCtx : NULL, &pSg);
3145 if (RT_FAILURE(rc))
3146 {
3147 /* Suspend TX as we are out of buffers atm */
3148 STATUS |= STATUS_TXOFF;
3149 return rc;
3150 }
3151 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3152 INSTANCE(pState), pState->cbTxAlloc,
3153 pState->fVTag ? "VLAN " : "",
3154 pState->fGSO ? "GSO " : ""));
3155 pState->cbTxAlloc = 0;
3156 }
3157 else
3158 {
3159 /* Create a loopback using the fallback buffer and preallocated SG. */
3160 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3161 pSg = &pState->uTxFallback.Sg;
3162 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3163 pSg->cbUsed = 0;
3164 pSg->cbAvailable = 0;
3165 pSg->pvAllocator = pState;
3166 pSg->pvUser = NULL; /* No GSO here. */
3167 pSg->cSegs = 1;
3168 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3169 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3170 }
3171
3172 pState->CTX_SUFF(pTxSg) = pSg;
3173 return VINF_SUCCESS;
3174}
3175#endif /* E1K_WITH_TXD_CACHE */
3176
3177/**
3178 * Checks if it's a GSO buffer or not.
3179 *
3180 * @returns true / false.
3181 * @param pTxSg The scatter / gather buffer.
3182 */
3183DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3184{
3185#if 0
3186 if (!pTxSg)
3187 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3188 if (pTxSg && pTxSg->pvUser)
3189 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3190#endif
3191 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3192}
3193
3194#ifndef E1K_WITH_TXD_CACHE
3195/**
3196 * Load transmit descriptor from guest memory.
3197 *
3198 * @param pState The device state structure.
3199 * @param pDesc Pointer to descriptor union.
3200 * @param addr Physical address in guest context.
3201 * @thread E1000_TX
3202 */
3203DECLINLINE(void) e1kLoadDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3204{
3205 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3206}
3207#else /* E1K_WITH_TXD_CACHE */
3208/**
3209 * Load transmit descriptors from guest memory.
3210 *
3211 * We need two physical reads in case the tail wrapped around the end of TX
3212 * descriptor ring.
3213 *
3214 * @returns the actual number of descriptors fetched.
3215 * @param pState The device state structure.
3216 * @param pDesc Pointer to descriptor union.
3217 * @param addr Physical address in guest context.
3218 * @thread E1000_TX
3219 */
3220DECLINLINE(unsigned) e1kTxDLoadMore(E1KSTATE* pState)
3221{
3222 unsigned nDescsToFetch = RT_MIN(e1kGetTxLen(pState), E1K_TXD_CACHE_SIZE - pState->nTxDFetched);
3223 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, TDLEN / sizeof(E1KTXDESC) - TDH);
3224 if (nDescsToFetch == 0)
3225 return 0;
3226 E1KTXDESC* pFirstEmptyDesc = &pState->aTxDescriptors[pState->nTxDFetched];
3227 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3228 ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(E1KTXDESC),
3229 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3230 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3231 INSTANCE(pState), nDescsInSingleRead, TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
3232 if (nDescsToFetch > nDescsInSingleRead)
3233 {
3234 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3235 ((uint64_t)TDBAH << 32) + TDBAL,
3236 pFirstEmptyDesc + nDescsInSingleRead,
3237 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3238 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3239 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
3240 TDBAH, TDBAL, TDLEN, TDH, TDT));
3241 }
3242 pState->nTxDFetched += nDescsToFetch;
3243 return nDescsToFetch;
3244}
3245
3246/**
3247 * Load transmit descriptors from guest memory only if there are no loaded
3248 * descriptors.
3249 *
3250 * @returns true if there are descriptors in cache.
3251 * @param pState The device state structure.
3252 * @param pDesc Pointer to descriptor union.
3253 * @param addr Physical address in guest context.
3254 * @thread E1000_TX
3255 */
3256DECLINLINE(bool) e1kTxDLazyLoad(E1KSTATE* pState)
3257{
3258 if (pState->nTxDFetched == 0)
3259 return e1kTxDLoadMore(pState) != 0;
3260 return true;
3261}
3262#endif /* E1K_WITH_TXD_CACHE */
3263
3264/**
3265 * Write back transmit descriptor to guest memory.
3266 *
3267 * @param pState The device state structure.
3268 * @param pDesc Pointer to descriptor union.
3269 * @param addr Physical address in guest context.
3270 * @thread E1000_TX
3271 */
3272DECLINLINE(void) e1kWriteBackDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3273{
3274 /* Only the last half of the descriptor has to be written back. */
3275 e1kPrintTDesc(pState, pDesc, "^^^");
3276 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3277}
3278
3279/**
3280 * Transmit complete frame.
3281 *
3282 * @remarks We skip the FCS since we're not responsible for sending anything to
3283 * a real ethernet wire.
3284 *
3285 * @param pState The device state structure.
3286 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3287 * @thread E1000_TX
3288 */
3289static void e1kTransmitFrame(E1KSTATE* pState, bool fOnWorkerThread)
3290{
3291 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3292 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3293 Assert(!pSg || pSg->cSegs == 1);
3294
3295 if (cbFrame > 70) /* unqualified guess */
3296 pState->led.Asserted.s.fWriting = pState->led.Actual.s.fWriting = 1;
3297
3298 /* Add VLAN tag */
3299 if (cbFrame > 12 && pState->fVTag)
3300 {
3301 E1kLog3(("%s Inserting VLAN tag %08x\n",
3302 INSTANCE(pState), RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16)));
3303 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3304 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16);
3305 pSg->cbUsed += 4;
3306 cbFrame += 4;
3307 Assert(pSg->cbUsed == cbFrame);
3308 Assert(pSg->cbUsed <= pSg->cbAvailable);
3309 }
3310/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3311 "%.*Rhxd\n"
3312 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3313 INSTANCE(pState), cbFrame, pSg->aSegs[0].pvSeg, INSTANCE(pState)));*/
3314
3315 /* Update the stats */
3316 E1K_INC_CNT32(TPT);
3317 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3318 E1K_INC_CNT32(GPTC);
3319 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3320 E1K_INC_CNT32(BPTC);
3321 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3322 E1K_INC_CNT32(MPTC);
3323 /* Update octet transmit counter */
3324 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3325 if (pState->CTX_SUFF(pDrv))
3326 STAM_REL_COUNTER_ADD(&pState->StatTransmitBytes, cbFrame);
3327 if (cbFrame == 64)
3328 E1K_INC_CNT32(PTC64);
3329 else if (cbFrame < 128)
3330 E1K_INC_CNT32(PTC127);
3331 else if (cbFrame < 256)
3332 E1K_INC_CNT32(PTC255);
3333 else if (cbFrame < 512)
3334 E1K_INC_CNT32(PTC511);
3335 else if (cbFrame < 1024)
3336 E1K_INC_CNT32(PTC1023);
3337 else
3338 E1K_INC_CNT32(PTC1522);
3339
3340 E1K_INC_ISTAT_CNT(pState->uStatTxFrm);
3341
3342 /*
3343 * Dump and send the packet.
3344 */
3345 int rc = VERR_NET_DOWN;
3346 if (pSg && pSg->pvAllocator != pState)
3347 {
3348 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3349
3350 pState->CTX_SUFF(pTxSg) = NULL;
3351 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3352 if (pDrv)
3353 {
3354 /* Release critical section to avoid deadlock in CanReceive */
3355 //e1kCsLeave(pState);
3356 STAM_PROFILE_START(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3357 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3358 STAM_PROFILE_STOP(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3359 //e1kCsEnter(pState, RT_SRC_POS);
3360 }
3361 }
3362 else if (pSg)
3363 {
3364 Assert(pSg->aSegs[0].pvSeg == pState->aTxPacketFallback);
3365 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3366
3367 /** @todo do we actually need to check that we're in loopback mode here? */
3368 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3369 {
3370 E1KRXDST status;
3371 RT_ZERO(status);
3372 status.fPIF = true;
3373 e1kHandleRxPacket(pState, pSg->aSegs[0].pvSeg, cbFrame, status);
3374 rc = VINF_SUCCESS;
3375 }
3376 e1kXmitFreeBuf(pState);
3377 }
3378 else
3379 rc = VERR_NET_DOWN;
3380 if (RT_FAILURE(rc))
3381 {
3382 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3383 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3384 }
3385
3386 pState->led.Actual.s.fWriting = 0;
3387}
3388
3389/**
3390 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3391 *
3392 * @param pState The device state structure.
3393 * @param pPkt Pointer to the packet.
3394 * @param u16PktLen Total length of the packet.
3395 * @param cso Offset in packet to write checksum at.
3396 * @param css Offset in packet to start computing
3397 * checksum from.
3398 * @param cse Offset in packet to stop computing
3399 * checksum at.
3400 * @thread E1000_TX
3401 */
3402static void e1kInsertChecksum(E1KSTATE* pState, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3403{
3404 if (css >= u16PktLen)
3405 {
3406 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3407 INSTANCE(pState), cso, u16PktLen));
3408 return;
3409 }
3410
3411 if (cso >= u16PktLen - 1)
3412 {
3413 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3414 INSTANCE(pState), cso, u16PktLen));
3415 return;
3416 }
3417
3418 if (cse == 0)
3419 cse = u16PktLen - 1;
3420 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3421 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", INSTANCE(pState),
3422 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3423 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3424}
3425
3426/**
3427 * Add a part of descriptor's buffer to transmit frame.
3428 *
3429 * @remarks data.u64BufAddr is used unconditionally for both data
3430 * and legacy descriptors since it is identical to
3431 * legacy.u64BufAddr.
3432 *
3433 * @param pState The device state structure.
3434 * @param pDesc Pointer to the descriptor to transmit.
3435 * @param u16Len Length of buffer to the end of segment.
3436 * @param fSend Force packet sending.
3437 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3438 * @thread E1000_TX
3439 */
3440#ifndef E1K_WITH_TXD_CACHE
3441static void e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3442{
3443 /* TCP header being transmitted */
3444 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3445 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3446 /* IP header being transmitted */
3447 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3448 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3449
3450 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3451 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3452 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3453
3454 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3455 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3456 E1kLog3(("%s Dump of the segment:\n"
3457 "%.*Rhxd\n"
3458 "%s --- End of dump ---\n",
3459 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3460 pState->u16TxPktLen += u16Len;
3461 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3462 INSTANCE(pState), pState->u16TxPktLen));
3463 if (pState->u16HdrRemain > 0)
3464 {
3465 /* The header was not complete, check if it is now */
3466 if (u16Len >= pState->u16HdrRemain)
3467 {
3468 /* The rest is payload */
3469 u16Len -= pState->u16HdrRemain;
3470 pState->u16HdrRemain = 0;
3471 /* Save partial checksum and flags */
3472 pState->u32SavedCsum = pTcpHdr->chksum;
3473 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
3474 /* Clear FIN and PSH flags now and set them only in the last segment */
3475 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
3476 }
3477 else
3478 {
3479 /* Still not */
3480 pState->u16HdrRemain -= u16Len;
3481 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
3482 INSTANCE(pState), pState->u16HdrRemain));
3483 return;
3484 }
3485 }
3486
3487 pState->u32PayRemain -= u16Len;
3488
3489 if (fSend)
3490 {
3491 /* Leave ethernet header intact */
3492 /* IP Total Length = payload + headers - ethernet header */
3493 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
3494 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
3495 INSTANCE(pState), ntohs(pIpHdr->total_len)));
3496 /* Update IP Checksum */
3497 pIpHdr->chksum = 0;
3498 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3499 pState->contextTSE.ip.u8CSO,
3500 pState->contextTSE.ip.u8CSS,
3501 pState->contextTSE.ip.u16CSE);
3502
3503 /* Update TCP flags */
3504 /* Restore original FIN and PSH flags for the last segment */
3505 if (pState->u32PayRemain == 0)
3506 {
3507 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
3508 E1K_INC_CNT32(TSCTC);
3509 }
3510 /* Add TCP length to partial pseudo header sum */
3511 uint32_t csum = pState->u32SavedCsum
3512 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
3513 while (csum >> 16)
3514 csum = (csum >> 16) + (csum & 0xFFFF);
3515 pTcpHdr->chksum = csum;
3516 /* Compute final checksum */
3517 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3518 pState->contextTSE.tu.u8CSO,
3519 pState->contextTSE.tu.u8CSS,
3520 pState->contextTSE.tu.u16CSE);
3521
3522 /*
3523 * Transmit it. If we've use the SG already, allocate a new one before
3524 * we copy of the data.
3525 */
3526 if (!pState->CTX_SUFF(pTxSg))
3527 e1kXmitAllocBuf(pState, pState->u16TxPktLen + (pState->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
3528 if (pState->CTX_SUFF(pTxSg))
3529 {
3530 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
3531 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
3532 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
3533 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
3534 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
3535 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
3536 }
3537 e1kTransmitFrame(pState, fOnWorkerThread);
3538
3539 /* Update Sequence Number */
3540 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
3541 - pState->contextTSE.dw3.u8HDRLEN);
3542 /* Increment IP identification */
3543 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
3544 }
3545}
3546#else /* E1K_WITH_TXD_CACHE */
3547static int e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3548{
3549 int rc = VINF_SUCCESS;
3550 /* TCP header being transmitted */
3551 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3552 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3553 /* IP header being transmitted */
3554 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3555 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3556
3557 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3558 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3559 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3560
3561 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3562 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3563 E1kLog3(("%s Dump of the segment:\n"
3564 "%.*Rhxd\n"
3565 "%s --- End of dump ---\n",
3566 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3567 pState->u16TxPktLen += u16Len;
3568 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3569 INSTANCE(pState), pState->u16TxPktLen));
3570 if (pState->u16HdrRemain > 0)
3571 {
3572 /* The header was not complete, check if it is now */
3573 if (u16Len >= pState->u16HdrRemain)
3574 {
3575 /* The rest is payload */
3576 u16Len -= pState->u16HdrRemain;
3577 pState->u16HdrRemain = 0;
3578 /* Save partial checksum and flags */
3579 pState->u32SavedCsum = pTcpHdr->chksum;
3580 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
3581 /* Clear FIN and PSH flags now and set them only in the last segment */
3582 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
3583 }
3584 else
3585 {
3586 /* Still not */
3587 pState->u16HdrRemain -= u16Len;
3588 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
3589 INSTANCE(pState), pState->u16HdrRemain));
3590 return rc;
3591 }
3592 }
3593
3594 pState->u32PayRemain -= u16Len;
3595
3596 if (fSend)
3597 {
3598 /* Leave ethernet header intact */
3599 /* IP Total Length = payload + headers - ethernet header */
3600 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
3601 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
3602 INSTANCE(pState), ntohs(pIpHdr->total_len)));
3603 /* Update IP Checksum */
3604 pIpHdr->chksum = 0;
3605 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3606 pState->contextTSE.ip.u8CSO,
3607 pState->contextTSE.ip.u8CSS,
3608 pState->contextTSE.ip.u16CSE);
3609
3610 /* Update TCP flags */
3611 /* Restore original FIN and PSH flags for the last segment */
3612 if (pState->u32PayRemain == 0)
3613 {
3614 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
3615 E1K_INC_CNT32(TSCTC);
3616 }
3617 /* Add TCP length to partial pseudo header sum */
3618 uint32_t csum = pState->u32SavedCsum
3619 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
3620 while (csum >> 16)
3621 csum = (csum >> 16) + (csum & 0xFFFF);
3622 pTcpHdr->chksum = csum;
3623 /* Compute final checksum */
3624 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3625 pState->contextTSE.tu.u8CSO,
3626 pState->contextTSE.tu.u8CSS,
3627 pState->contextTSE.tu.u16CSE);
3628
3629 /*
3630 * Transmit it.
3631 */
3632 if (pState->CTX_SUFF(pTxSg))
3633 {
3634 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
3635 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
3636 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
3637 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
3638 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
3639 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
3640 }
3641 e1kTransmitFrame(pState, fOnWorkerThread);
3642
3643 /* Update Sequence Number */
3644 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
3645 - pState->contextTSE.dw3.u8HDRLEN);
3646 /* Increment IP identification */
3647 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
3648
3649 /* Allocate new buffer for the next segment. */
3650 if (pState->u32PayRemain)
3651 {
3652 pState->cbTxAlloc = RT_MIN(pState->u32PayRemain,
3653 pState->contextTSE.dw3.u16MSS)
3654 + pState->contextTSE.dw3.u8HDRLEN
3655 + (pState->fVTag ? 4 : 0);
3656 rc = e1kXmitAllocBuf(pState, false /* fGSO */);
3657 }
3658 }
3659
3660 return rc;
3661}
3662#endif /* E1K_WITH_TXD_CACHE */
3663
3664#ifndef E1K_WITH_TXD_CACHE
3665/**
3666 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
3667 * frame.
3668 *
3669 * We construct the frame in the fallback buffer first and the copy it to the SG
3670 * buffer before passing it down to the network driver code.
3671 *
3672 * @returns true if the frame should be transmitted, false if not.
3673 *
3674 * @param pState The device state structure.
3675 * @param pDesc Pointer to the descriptor to transmit.
3676 * @param cbFragment Length of descriptor's buffer.
3677 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3678 * @thread E1000_TX
3679 */
3680static bool e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, uint32_t cbFragment, bool fOnWorkerThread)
3681{
3682 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
3683 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
3684 Assert(pDesc->data.cmd.fTSE);
3685 Assert(!e1kXmitIsGsoBuf(pTxSg));
3686
3687 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
3688 Assert(u16MaxPktLen != 0);
3689 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
3690
3691 /*
3692 * Carve out segments.
3693 */
3694 do
3695 {
3696 /* Calculate how many bytes we have left in this TCP segment */
3697 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
3698 if (cb > cbFragment)
3699 {
3700 /* This descriptor fits completely into current segment */
3701 cb = cbFragment;
3702 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
3703 }
3704 else
3705 {
3706 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
3707 /*
3708 * Rewind the packet tail pointer to the beginning of payload,
3709 * so we continue writing right beyond the header.
3710 */
3711 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
3712 }
3713
3714 pDesc->data.u64BufAddr += cb;
3715 cbFragment -= cb;
3716 } while (cbFragment > 0);
3717
3718 if (pDesc->data.cmd.fEOP)
3719 {
3720 /* End of packet, next segment will contain header. */
3721 if (pState->u32PayRemain != 0)
3722 E1K_INC_CNT32(TSCTFC);
3723 pState->u16TxPktLen = 0;
3724 e1kXmitFreeBuf(pState);
3725 }
3726
3727 return false;
3728}
3729#else /* E1K_WITH_TXD_CACHE */
3730/**
3731 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
3732 * frame.
3733 *
3734 * We construct the frame in the fallback buffer first and the copy it to the SG
3735 * buffer before passing it down to the network driver code.
3736 *
3737 * @returns error code
3738 *
3739 * @param pState The device state structure.
3740 * @param pDesc Pointer to the descriptor to transmit.
3741 * @param cbFragment Length of descriptor's buffer.
3742 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3743 * @thread E1000_TX
3744 */
3745static int e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, bool fOnWorkerThread)
3746{
3747 int rc = VINF_SUCCESS;
3748 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
3749 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
3750 Assert(pDesc->data.cmd.fTSE);
3751 Assert(!e1kXmitIsGsoBuf(pTxSg));
3752
3753 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
3754 Assert(u16MaxPktLen != 0);
3755 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
3756
3757 /*
3758 * Carve out segments.
3759 */
3760 do
3761 {
3762 /* Calculate how many bytes we have left in this TCP segment */
3763 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
3764 if (cb > pDesc->data.cmd.u20DTALEN)
3765 {
3766 /* This descriptor fits completely into current segment */
3767 cb = pDesc->data.cmd.u20DTALEN;
3768 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
3769 }
3770 else
3771 {
3772 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
3773 /*
3774 * Rewind the packet tail pointer to the beginning of payload,
3775 * so we continue writing right beyond the header.
3776 */
3777 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
3778 }
3779
3780 pDesc->data.u64BufAddr += cb;
3781 pDesc->data.cmd.u20DTALEN -= cb;
3782 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
3783
3784 if (pDesc->data.cmd.fEOP)
3785 {
3786 /* End of packet, next segment will contain header. */
3787 if (pState->u32PayRemain != 0)
3788 E1K_INC_CNT32(TSCTFC);
3789 pState->u16TxPktLen = 0;
3790 e1kXmitFreeBuf(pState);
3791 }
3792
3793 return false;
3794}
3795#endif /* E1K_WITH_TXD_CACHE */
3796
3797
3798/**
3799 * Add descriptor's buffer to transmit frame.
3800 *
3801 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
3802 * TSE frames we cannot handle as GSO.
3803 *
3804 * @returns true on success, false on failure.
3805 *
3806 * @param pThis The device state structure.
3807 * @param PhysAddr The physical address of the descriptor buffer.
3808 * @param cbFragment Length of descriptor's buffer.
3809 * @thread E1000_TX
3810 */
3811static bool e1kAddToFrame(E1KSTATE *pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
3812{
3813 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
3814 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
3815 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
3816
3817 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
3818 {
3819 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", INSTANCE(pThis), cbNewPkt, E1K_MAX_TX_PKT_SIZE));
3820 return false;
3821 }
3822 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
3823 {
3824 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", INSTANCE(pThis), cbNewPkt, pTxSg->cbAvailable));
3825 return false;
3826 }
3827
3828 if (RT_LIKELY(pTxSg))
3829 {
3830 Assert(pTxSg->cSegs == 1);
3831 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
3832
3833 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
3834 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
3835
3836 pTxSg->cbUsed = cbNewPkt;
3837 }
3838 pThis->u16TxPktLen = cbNewPkt;
3839
3840 return true;
3841}
3842
3843
3844/**
3845 * Write the descriptor back to guest memory and notify the guest.
3846 *
3847 * @param pState The device state structure.
3848 * @param pDesc Pointer to the descriptor have been transmitted.
3849 * @param addr Physical address of the descriptor in guest memory.
3850 * @thread E1000_TX
3851 */
3852static void e1kDescReport(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3853{
3854 /*
3855 * We fake descriptor write-back bursting. Descriptors are written back as they are
3856 * processed.
3857 */
3858 /* Let's pretend we process descriptors. Write back with DD set. */
3859 /*
3860 * Prior to r71586 we tried to accomodate the case when write-back bursts
3861 * are enabled without actually implementing bursting by writing back all
3862 * descriptors, even the ones that do not have RS set. This caused kernel
3863 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
3864 * associated with written back descriptor if it happened to be a context
3865 * descriptor since context descriptors do not have skb associated to them.
3866 * Starting from r71586 we write back only the descriptors with RS set,
3867 * which is a little bit different from what the real hardware does in
3868 * case there is a chain of data descritors where some of them have RS set
3869 * and others do not. It is very uncommon scenario imho.
3870 */
3871 if (pDesc->legacy.cmd.fRS)
3872 {
3873 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
3874 e1kWriteBackDesc(pState, pDesc, addr);
3875 if (pDesc->legacy.cmd.fEOP)
3876 {
3877#ifdef E1K_USE_TX_TIMERS
3878 if (pDesc->legacy.cmd.fIDE)
3879 {
3880 E1K_INC_ISTAT_CNT(pState->uStatTxIDE);
3881 //if (pState->fIntRaised)
3882 //{
3883 // /* Interrupt is already pending, no need for timers */
3884 // ICR |= ICR_TXDW;
3885 //}
3886 //else {
3887 /* Arm the timer to fire in TIVD usec (discard .024) */
3888 e1kArmTimer(pState, pState->CTX_SUFF(pTIDTimer), TIDV);
3889# ifndef E1K_NO_TAD
3890 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
3891 E1kLog2(("%s Checking if TAD timer is running\n",
3892 INSTANCE(pState)));
3893 if (TADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pTADTimer)))
3894 e1kArmTimer(pState, pState->CTX_SUFF(pTADTimer), TADV);
3895# endif /* E1K_NO_TAD */
3896 }
3897 else
3898 {
3899 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
3900 INSTANCE(pState)));
3901# ifndef E1K_NO_TAD
3902 /* Cancel both timers if armed and fire immediately. */
3903 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
3904# endif /* E1K_NO_TAD */
3905#endif /* E1K_USE_TX_TIMERS */
3906 E1K_INC_ISTAT_CNT(pState->uStatIntTx);
3907 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXDW);
3908#ifdef E1K_USE_TX_TIMERS
3909 }
3910#endif /* E1K_USE_TX_TIMERS */
3911 }
3912 }
3913 else
3914 {
3915 E1K_INC_ISTAT_CNT(pState->uStatTxNoRS);
3916 }
3917}
3918
3919#ifndef E1K_WITH_TXD_CACHE
3920/**
3921 * Process Transmit Descriptor.
3922 *
3923 * E1000 supports three types of transmit descriptors:
3924 * - legacy data descriptors of older format (context-less).
3925 * - data the same as legacy but providing new offloading capabilities.
3926 * - context sets up the context for following data descriptors.
3927 *
3928 * @param pState The device state structure.
3929 * @param pDesc Pointer to descriptor union.
3930 * @param addr Physical address of descriptor in guest memory.
3931 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3932 * @thread E1000_TX
3933 */
3934static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr, bool fOnWorkerThread)
3935{
3936 int rc = VINF_SUCCESS;
3937 uint32_t cbVTag = 0;
3938
3939 e1kPrintTDesc(pState, pDesc, "vvv");
3940
3941#ifdef E1K_USE_TX_TIMERS
3942 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
3943#endif /* E1K_USE_TX_TIMERS */
3944
3945 switch (e1kGetDescType(pDesc))
3946 {
3947 case E1K_DTYP_CONTEXT:
3948 if (pDesc->context.dw2.fTSE)
3949 {
3950 pState->contextTSE = pDesc->context;
3951 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
3952 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
3953 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
3954 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
3955 }
3956 else
3957 {
3958 pState->contextNormal = pDesc->context;
3959 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
3960 }
3961 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
3962 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
3963 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
3964 pDesc->context.ip.u8CSS,
3965 pDesc->context.ip.u8CSO,
3966 pDesc->context.ip.u16CSE,
3967 pDesc->context.tu.u8CSS,
3968 pDesc->context.tu.u8CSO,
3969 pDesc->context.tu.u16CSE));
3970 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
3971 e1kDescReport(pState, pDesc, addr);
3972 break;
3973
3974 case E1K_DTYP_DATA:
3975 {
3976 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
3977 {
3978 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
3979 /** @todo Same as legacy when !TSE. See below. */
3980 break;
3981 }
3982 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
3983 &pState->StatTxDescTSEData:
3984 &pState->StatTxDescData);
3985 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
3986 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
3987
3988 /*
3989 * The last descriptor of non-TSE packet must contain VLE flag.
3990 * TSE packets have VLE flag in the first descriptor. The later
3991 * case is taken care of a bit later when cbVTag gets assigned.
3992 *
3993 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
3994 */
3995 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
3996 {
3997 pState->fVTag = pDesc->data.cmd.fVLE;
3998 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
3999 }
4000 /*
4001 * First fragment: Allocate new buffer and save the IXSM and TXSM
4002 * packet options as these are only valid in the first fragment.
4003 */
4004 if (pState->u16TxPktLen == 0)
4005 {
4006 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4007 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4008 E1kLog2(("%s Saving checksum flags:%s%s; \n", INSTANCE(pState),
4009 pState->fIPcsum ? " IP" : "",
4010 pState->fTCPcsum ? " TCP/UDP" : ""));
4011 if (pDesc->data.cmd.fTSE)
4012 {
4013 /* 2) pDesc->data.cmd.fTSE && pState->u16TxPktLen == 0 */
4014 pState->fVTag = pDesc->data.cmd.fVLE;
4015 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4016 cbVTag = pState->fVTag ? 4 : 0;
4017 }
4018 else if (pDesc->data.cmd.fEOP)
4019 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4020 else
4021 cbVTag = 4;
4022 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4023 if (e1kCanDoGso(&pState->GsoCtx, &pDesc->data, &pState->contextTSE))
4024 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw2.u20PAYLEN + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4025 true /*fExactSize*/, true /*fGso*/);
4026 else if (pDesc->data.cmd.fTSE)
4027 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4028 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4029 else
4030 rc = e1kXmitAllocBuf(pState, pDesc->data.cmd.u20DTALEN + cbVTag,
4031 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4032
4033 /**
4034 * @todo: Perhaps it is not that simple for GSO packets! We may
4035 * need to unwind some changes.
4036 */
4037 if (RT_FAILURE(rc))
4038 {
4039 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4040 break;
4041 }
4042 /** @todo Is there any way to indicating errors other than collisions? Like
4043 * VERR_NET_DOWN. */
4044 }
4045
4046 /*
4047 * Add the descriptor data to the frame. If the frame is complete,
4048 * transmit it and reset the u16TxPktLen field.
4049 */
4050 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4051 {
4052 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4053 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4054 if (pDesc->data.cmd.fEOP)
4055 {
4056 if ( fRc
4057 && pState->CTX_SUFF(pTxSg)
4058 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4059 {
4060 e1kTransmitFrame(pState, fOnWorkerThread);
4061 E1K_INC_CNT32(TSCTC);
4062 }
4063 else
4064 {
4065 if (fRc)
4066 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4067 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4068 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4069 e1kXmitFreeBuf(pState);
4070 E1K_INC_CNT32(TSCTFC);
4071 }
4072 pState->u16TxPktLen = 0;
4073 }
4074 }
4075 else if (!pDesc->data.cmd.fTSE)
4076 {
4077 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4078 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4079 if (pDesc->data.cmd.fEOP)
4080 {
4081 if (fRc && pState->CTX_SUFF(pTxSg))
4082 {
4083 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4084 if (pState->fIPcsum)
4085 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4086 pState->contextNormal.ip.u8CSO,
4087 pState->contextNormal.ip.u8CSS,
4088 pState->contextNormal.ip.u16CSE);
4089 if (pState->fTCPcsum)
4090 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4091 pState->contextNormal.tu.u8CSO,
4092 pState->contextNormal.tu.u8CSS,
4093 pState->contextNormal.tu.u16CSE);
4094 e1kTransmitFrame(pState, fOnWorkerThread);
4095 }
4096 else
4097 e1kXmitFreeBuf(pState);
4098 pState->u16TxPktLen = 0;
4099 }
4100 }
4101 else
4102 {
4103 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4104 e1kFallbackAddToFrame(pState, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4105 }
4106
4107 e1kDescReport(pState, pDesc, addr);
4108 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4109 break;
4110 }
4111
4112 case E1K_DTYP_LEGACY:
4113 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4114 {
4115 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4116 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4117 break;
4118 }
4119 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4120 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4121
4122 /* First fragment: allocate new buffer. */
4123 if (pState->u16TxPktLen == 0)
4124 {
4125 if (pDesc->legacy.cmd.fEOP)
4126 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4127 else
4128 cbVTag = 4;
4129 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4130 /** @todo reset status bits? */
4131 rc = e1kXmitAllocBuf(pState, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4132 if (RT_FAILURE(rc))
4133 {
4134 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4135 break;
4136 }
4137
4138 /** @todo Is there any way to indicating errors other than collisions? Like
4139 * VERR_NET_DOWN. */
4140 }
4141
4142 /* Add fragment to frame. */
4143 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4144 {
4145 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4146
4147 /* Last fragment: Transmit and reset the packet storage counter. */
4148 if (pDesc->legacy.cmd.fEOP)
4149 {
4150 pState->fVTag = pDesc->legacy.cmd.fVLE;
4151 pState->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4152 /** @todo Offload processing goes here. */
4153 e1kTransmitFrame(pState, fOnWorkerThread);
4154 pState->u16TxPktLen = 0;
4155 }
4156 }
4157 /* Last fragment + failure: free the buffer and reset the storage counter. */
4158 else if (pDesc->legacy.cmd.fEOP)
4159 {
4160 e1kXmitFreeBuf(pState);
4161 pState->u16TxPktLen = 0;
4162 }
4163
4164 e1kDescReport(pState, pDesc, addr);
4165 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4166 break;
4167
4168 default:
4169 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4170 INSTANCE(pState), e1kGetDescType(pDesc)));
4171 break;
4172 }
4173
4174 return rc;
4175}
4176#else /* E1K_WITH_TXD_CACHE */
4177/**
4178 * Process Transmit Descriptor.
4179 *
4180 * E1000 supports three types of transmit descriptors:
4181 * - legacy data descriptors of older format (context-less).
4182 * - data the same as legacy but providing new offloading capabilities.
4183 * - context sets up the context for following data descriptors.
4184 *
4185 * @param pState The device state structure.
4186 * @param pDesc Pointer to descriptor union.
4187 * @param addr Physical address of descriptor in guest memory.
4188 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4189 * @param cbPacketSize Size of the packet as previously computed.
4190 * @thread E1000_TX
4191 */
4192static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr,
4193 bool fOnWorkerThread)
4194{
4195 int rc = VINF_SUCCESS;
4196 uint32_t cbVTag = 0;
4197
4198 e1kPrintTDesc(pState, pDesc, "vvv");
4199
4200#ifdef E1K_USE_TX_TIMERS
4201 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4202#endif /* E1K_USE_TX_TIMERS */
4203
4204 switch (e1kGetDescType(pDesc))
4205 {
4206 case E1K_DTYP_CONTEXT:
4207 /* The caller have already updated the context */
4208 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4209 e1kDescReport(pState, pDesc, addr);
4210 break;
4211
4212 case E1K_DTYP_DATA:
4213 {
4214 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4215 {
4216 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4217 /** @todo Same as legacy when !TSE. See below. */
4218 break;
4219 }
4220 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4221 &pState->StatTxDescTSEData:
4222 &pState->StatTxDescData);
4223 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4224
4225 /*
4226 * Add the descriptor data to the frame. If the frame is complete,
4227 * transmit it and reset the u16TxPktLen field.
4228 */
4229 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4230 {
4231 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4232 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4233 if (pDesc->data.cmd.fEOP)
4234 {
4235 if ( fRc
4236 && pState->CTX_SUFF(pTxSg)
4237 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4238 {
4239 e1kTransmitFrame(pState, fOnWorkerThread);
4240 E1K_INC_CNT32(TSCTC);
4241 }
4242 else
4243 {
4244 if (fRc)
4245 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4246 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4247 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4248 e1kXmitFreeBuf(pState);
4249 E1K_INC_CNT32(TSCTFC);
4250 }
4251 pState->u16TxPktLen = 0;
4252 }
4253 }
4254 else if (!pDesc->data.cmd.fTSE)
4255 {
4256 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4257 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4258 if (pDesc->data.cmd.fEOP)
4259 {
4260 if (fRc && pState->CTX_SUFF(pTxSg))
4261 {
4262 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4263 if (pState->fIPcsum)
4264 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4265 pState->contextNormal.ip.u8CSO,
4266 pState->contextNormal.ip.u8CSS,
4267 pState->contextNormal.ip.u16CSE);
4268 if (pState->fTCPcsum)
4269 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4270 pState->contextNormal.tu.u8CSO,
4271 pState->contextNormal.tu.u8CSS,
4272 pState->contextNormal.tu.u16CSE);
4273 e1kTransmitFrame(pState, fOnWorkerThread);
4274 }
4275 else
4276 e1kXmitFreeBuf(pState);
4277 pState->u16TxPktLen = 0;
4278 }
4279 }
4280 else
4281 {
4282 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4283 rc = e1kFallbackAddToFrame(pState, pDesc, fOnWorkerThread);
4284 }
4285
4286 e1kDescReport(pState, pDesc, addr);
4287 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4288 break;
4289 }
4290
4291 case E1K_DTYP_LEGACY:
4292 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4293 {
4294 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4295 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4296 break;
4297 }
4298 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4299 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4300
4301 /* Add fragment to frame. */
4302 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4303 {
4304 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4305
4306 /* Last fragment: Transmit and reset the packet storage counter. */
4307 if (pDesc->legacy.cmd.fEOP)
4308 {
4309 if (pDesc->legacy.cmd.fIC)
4310 {
4311 e1kInsertChecksum(pState,
4312 (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4313 pState->u16TxPktLen,
4314 pDesc->legacy.cmd.u8CSO,
4315 pDesc->legacy.dw3.u8CSS,
4316 0);
4317 }
4318 e1kTransmitFrame(pState, fOnWorkerThread);
4319 pState->u16TxPktLen = 0;
4320 }
4321 }
4322 /* Last fragment + failure: free the buffer and reset the storage counter. */
4323 else if (pDesc->legacy.cmd.fEOP)
4324 {
4325 e1kXmitFreeBuf(pState);
4326 pState->u16TxPktLen = 0;
4327 }
4328
4329 e1kDescReport(pState, pDesc, addr);
4330 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4331 break;
4332
4333 default:
4334 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4335 INSTANCE(pState), e1kGetDescType(pDesc)));
4336 break;
4337 }
4338
4339 return rc;
4340}
4341
4342
4343DECLINLINE(void) e1kUpdateTxContext(E1KSTATE* pState, E1KTXDESC* pDesc)
4344{
4345 if (pDesc->context.dw2.fTSE)
4346 {
4347 pState->contextTSE = pDesc->context;
4348 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4349 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4350 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4351 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4352 }
4353 else
4354 {
4355 pState->contextNormal = pDesc->context;
4356 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4357 }
4358 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4359 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4360 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4361 pDesc->context.ip.u8CSS,
4362 pDesc->context.ip.u8CSO,
4363 pDesc->context.ip.u16CSE,
4364 pDesc->context.tu.u8CSS,
4365 pDesc->context.tu.u8CSO,
4366 pDesc->context.tu.u16CSE));
4367}
4368
4369
4370static bool e1kLocateTxPacket(E1KSTATE *pState)
4371{
4372 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4373 INSTANCE(pState), pState->cbTxAlloc));
4374 /* Check if we have located the packet already. */
4375 if (pState->cbTxAlloc)
4376 {
4377 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4378 INSTANCE(pState), pState->cbTxAlloc));
4379 return true;
4380 }
4381
4382 bool fTSE = false;
4383 uint32_t cbPacket = 0;
4384
4385 for (int i = pState->iTxDCurrent; i < pState->nTxDFetched; ++i)
4386 {
4387 E1KTXDESC *pDesc = &pState->aTxDescriptors[i];
4388 switch (e1kGetDescType(pDesc))
4389 {
4390 case E1K_DTYP_CONTEXT:
4391 e1kUpdateTxContext(pState, pDesc);
4392 continue;
4393 case E1K_DTYP_LEGACY:
4394 cbPacket += pDesc->legacy.cmd.u16Length;
4395 pState->fGSO = false;
4396 break;
4397 case E1K_DTYP_DATA:
4398 if (cbPacket == 0)
4399 {
4400 /*
4401 * The first fragment: save IXSM and TXSM options
4402 * as these are only valid in the first fragment.
4403 */
4404 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4405 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4406 fTSE = pDesc->data.cmd.fTSE;
4407 /*
4408 * TSE descriptors have VLE bit properly set in
4409 * the first fragment.
4410 */
4411 if (fTSE)
4412 {
4413 pState->fVTag = pDesc->data.cmd.fVLE;
4414 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4415 }
4416 pState->fGSO = e1kCanDoGso(&pState->GsoCtx, &pDesc->data, &pState->contextTSE);
4417 }
4418 cbPacket += pDesc->data.cmd.u20DTALEN;
4419 break;
4420 default:
4421 AssertMsgFailed(("Impossible descriptor type!"));
4422 }
4423 if (pDesc->legacy.cmd.fEOP)
4424 {
4425 /*
4426 * Non-TSE descriptors have VLE bit properly set in
4427 * the last fragment.
4428 */
4429 if (!fTSE)
4430 {
4431 pState->fVTag = pDesc->data.cmd.fVLE;
4432 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4433 }
4434 /*
4435 * Compute the required buffer size. If we cannot do GSO but still
4436 * have to do segmentation we allocate the first segment only.
4437 */
4438 pState->cbTxAlloc = (!fTSE || pState->fGSO) ?
4439 cbPacket :
4440 RT_MIN(cbPacket, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN);
4441 if (pState->fVTag)
4442 pState->cbTxAlloc += 4;
4443 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4444 INSTANCE(pState), pState->cbTxAlloc));
4445 return true;
4446 }
4447 }
4448
4449 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
4450 INSTANCE(pState), pState->cbTxAlloc));
4451 return false;
4452}
4453
4454
4455static int e1kXmitPacket(E1KSTATE *pState, bool fOnWorkerThread)
4456{
4457 int rc = VINF_SUCCESS;
4458
4459 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
4460 INSTANCE(pState), pState->iTxDCurrent, pState->nTxDFetched));
4461
4462 while (pState->iTxDCurrent < pState->nTxDFetched)
4463 {
4464 E1KTXDESC *pDesc = &pState->aTxDescriptors[pState->iTxDCurrent];
4465 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4466 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
4467 rc = e1kXmitDesc(pState, pDesc,
4468 ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(E1KTXDESC),
4469 fOnWorkerThread);
4470 if (RT_FAILURE(rc))
4471 break;
4472 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
4473 TDH = 0;
4474 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
4475 if (uLowThreshold != 0 && e1kGetTxLen(pState) <= uLowThreshold)
4476 {
4477 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4478 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4479 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4480 }
4481 ++pState->iTxDCurrent;
4482 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
4483 break;
4484 }
4485
4486 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
4487 INSTANCE(pState), rc, pState->iTxDCurrent, pState->nTxDFetched));
4488 return rc;
4489}
4490#endif /* E1K_WITH_TXD_CACHE */
4491
4492#ifndef E1K_WITH_TXD_CACHE
4493/**
4494 * Transmit pending descriptors.
4495 *
4496 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
4497 *
4498 * @param pState The E1000 state.
4499 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
4500 */
4501static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
4502{
4503 int rc = VINF_SUCCESS;
4504
4505 /*
4506 * Grab the xmit lock of the driver as well as the E1K device state.
4507 */
4508 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
4509 if (pDrv)
4510 {
4511 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
4512 if (RT_FAILURE(rc))
4513 return rc;
4514 }
4515 /*
4516 * Process all pending descriptors.
4517 * Note! Do not process descriptors in locked state
4518 */
4519 while (TDH != TDT && !pState->fLocked)
4520 {
4521 E1KTXDESC desc;
4522 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4523 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
4524
4525 e1kLoadDesc(pState, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
4526 rc = e1kXmitDesc(pState, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc), fOnWorkerThread);
4527 /* If we failed to transmit descriptor we will try it again later */
4528 if (RT_FAILURE(rc))
4529 break;
4530 if (++TDH * sizeof(desc) >= TDLEN)
4531 TDH = 0;
4532
4533 if (e1kGetTxLen(pState) <= GET_BITS(TXDCTL, LWTHRESH)*8)
4534 {
4535 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4536 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4537 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4538 }
4539
4540 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4541 }
4542
4543 /// @todo: uncomment: pState->uStatIntTXQE++;
4544 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
4545
4546 /*
4547 * Release the lock.
4548 */
4549 if (pDrv)
4550 pDrv->pfnEndXmit(pDrv);
4551 return rc;
4552}
4553#else /* E1K_WITH_TXD_CACHE */
4554/**
4555 * Transmit pending descriptors.
4556 *
4557 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
4558 *
4559 * @param pState The E1000 state.
4560 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
4561 */
4562static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
4563{
4564 int rc = VINF_SUCCESS;
4565
4566 /*
4567 * Grab the xmit lock of the driver as well as the E1K device state.
4568 */
4569 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
4570 if (pDrv)
4571 {
4572 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
4573 if (RT_FAILURE(rc))
4574 return rc;
4575 }
4576
4577 /*
4578 * Process all pending descriptors.
4579 * Note! Do not process descriptors in locked state
4580 */
4581 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4582 while (!pState->fLocked && e1kTxDLazyLoad(pState))
4583 {
4584 while (e1kLocateTxPacket(pState))
4585 {
4586 /* Found a complete packet, allocate it. */
4587 rc = e1kXmitAllocBuf(pState, pState->fGSO);
4588 /* If we're out of bandwidth we'll come back later. */
4589 if (RT_FAILURE(rc))
4590 goto out;
4591 /* Copy the packet to allocated buffer and send it. */
4592 rc = e1kXmitPacket(pState, fOnWorkerThread);
4593 /* If we're out of bandwidth we'll come back later. */
4594 if (RT_FAILURE(rc))
4595 goto out;
4596 }
4597 uint8_t u8Remain = pState->nTxDFetched - pState->iTxDCurrent;
4598 if (u8Remain > 0)
4599 {
4600 /*
4601 * A packet was partially fetched. Move incomplete packet to
4602 * the beginning of cache buffer, then load more descriptors.
4603 */
4604 memmove(pState->aTxDescriptors,
4605 &pState->aTxDescriptors[pState->iTxDCurrent],
4606 u8Remain * sizeof(E1KTXDESC));
4607 pState->nTxDFetched = u8Remain;
4608 e1kTxDLoadMore(pState);
4609 }
4610 else
4611 pState->nTxDFetched = 0;
4612 pState->iTxDCurrent = 0;
4613 }
4614 if (!pState->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
4615 {
4616 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
4617 INSTANCE(pState)));
4618 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4619 }
4620
4621out:
4622 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4623
4624 /// @todo: uncomment: pState->uStatIntTXQE++;
4625 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
4626
4627 /*
4628 * Release the lock.
4629 */
4630 if (pDrv)
4631 pDrv->pfnEndXmit(pDrv);
4632 return rc;
4633}
4634#endif /* E1K_WITH_TXD_CACHE */
4635
4636#ifdef IN_RING3
4637
4638/**
4639 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
4640 */
4641static DECLCALLBACK(void) e1kNetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
4642{
4643 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
4644 /* Resume suspended transmission */
4645 STATUS &= ~STATUS_TXOFF;
4646 e1kXmitPending(pState, true /*fOnWorkerThread*/);
4647}
4648
4649/**
4650 * Callback for consuming from transmit queue. It gets called in R3 whenever
4651 * we enqueue something in R0/GC.
4652 *
4653 * @returns true
4654 * @param pDevIns Pointer to device instance structure.
4655 * @param pItem Pointer to the element being dequeued (not used).
4656 * @thread ???
4657 */
4658static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
4659{
4660 NOREF(pItem);
4661 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
4662 E1kLog2(("%s e1kTxQueueConsumer:\n", INSTANCE(pState)));
4663
4664 int rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
4665 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
4666
4667 return true;
4668}
4669
4670/**
4671 * Handler for the wakeup signaller queue.
4672 */
4673static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
4674{
4675 e1kWakeupReceive(pDevIns);
4676 return true;
4677}
4678
4679#endif /* IN_RING3 */
4680
4681/**
4682 * Write handler for Transmit Descriptor Tail register.
4683 *
4684 * @param pState The device state structure.
4685 * @param offset Register offset in memory-mapped frame.
4686 * @param index Register index in register array.
4687 * @param value The value to store.
4688 * @param mask Used to implement partial writes (8 and 16-bit).
4689 * @thread EMT
4690 */
4691static int e1kRegWriteTDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
4692{
4693 int rc = e1kCsTxEnter(pState, VINF_IOM_R3_MMIO_WRITE);
4694 if (RT_UNLIKELY(rc != VINF_SUCCESS))
4695 return rc;
4696 rc = e1kRegWriteDefault(pState, offset, index, value);
4697
4698 /* All descriptors starting with head and not including tail belong to us. */
4699 /* Process them. */
4700 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4701 INSTANCE(pState), TDBAL, TDBAH, TDLEN, TDH, TDT));
4702
4703 /* Ignore TDT writes when the link is down. */
4704 if (TDH != TDT && (STATUS & STATUS_LU))
4705 {
4706 E1kLogRel(("E1000: TDT write: %d descriptors to process\n", e1kGetTxLen(pState)));
4707 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process, waking up E1000_TX thread\n",
4708 INSTANCE(pState), e1kGetTxLen(pState)));
4709 e1kCsTxLeave(pState);
4710
4711 /* Transmit pending packets if possible, defer it if we cannot do it
4712 in the current context. */
4713# ifndef IN_RING3
4714 if (!pState->CTX_SUFF(pDrv))
4715 {
4716 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pTxQueue));
4717 if (RT_UNLIKELY(pItem))
4718 PDMQueueInsert(pState->CTX_SUFF(pTxQueue), pItem);
4719 }
4720 else
4721# endif
4722 {
4723 rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
4724 if (rc == VERR_TRY_AGAIN)
4725 rc = VINF_SUCCESS;
4726 AssertRC(rc);
4727 }
4728 }
4729 else
4730 e1kCsTxLeave(pState);
4731
4732 return rc;
4733}
4734
4735/**
4736 * Write handler for Multicast Table Array registers.
4737 *
4738 * @param pState The device state structure.
4739 * @param offset Register offset in memory-mapped frame.
4740 * @param index Register index in register array.
4741 * @param value The value to store.
4742 * @thread EMT
4743 */
4744static int e1kRegWriteMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
4745{
4746 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
4747 pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])] = value;
4748
4749 return VINF_SUCCESS;
4750}
4751
4752/**
4753 * Read handler for Multicast Table Array registers.
4754 *
4755 * @returns VBox status code.
4756 *
4757 * @param pState The device state structure.
4758 * @param offset Register offset in memory-mapped frame.
4759 * @param index Register index in register array.
4760 * @thread EMT
4761 */
4762static int e1kRegReadMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
4763{
4764 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
4765 *pu32Value = pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])];
4766
4767 return VINF_SUCCESS;
4768}
4769
4770/**
4771 * Write handler for Receive Address registers.
4772 *
4773 * @param pState The device state structure.
4774 * @param offset Register offset in memory-mapped frame.
4775 * @param index Register index in register array.
4776 * @param value The value to store.
4777 * @thread EMT
4778 */
4779static int e1kRegWriteRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
4780{
4781 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
4782 pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])] = value;
4783
4784 return VINF_SUCCESS;
4785}
4786
4787/**
4788 * Read handler for Receive Address registers.
4789 *
4790 * @returns VBox status code.
4791 *
4792 * @param pState The device state structure.
4793 * @param offset Register offset in memory-mapped frame.
4794 * @param index Register index in register array.
4795 * @thread EMT
4796 */
4797static int e1kRegReadRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
4798{
4799 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
4800 *pu32Value = pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])];
4801
4802 return VINF_SUCCESS;
4803}
4804
4805/**
4806 * Write handler for VLAN Filter Table Array registers.
4807 *
4808 * @param pState The device state structure.
4809 * @param offset Register offset in memory-mapped frame.
4810 * @param index Register index in register array.
4811 * @param value The value to store.
4812 * @thread EMT
4813 */
4814static int e1kRegWriteVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
4815{
4816 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auVFTA), VINF_SUCCESS);
4817 pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])] = value;
4818
4819 return VINF_SUCCESS;
4820}
4821
4822/**
4823 * Read handler for VLAN Filter Table Array registers.
4824 *
4825 * @returns VBox status code.
4826 *
4827 * @param pState The device state structure.
4828 * @param offset Register offset in memory-mapped frame.
4829 * @param index Register index in register array.
4830 * @thread EMT
4831 */
4832static int e1kRegReadVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
4833{
4834 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auVFTA), VERR_DEV_IO_ERROR);
4835 *pu32Value = pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])];
4836
4837 return VINF_SUCCESS;
4838}
4839
4840/**
4841 * Read handler for unimplemented registers.
4842 *
4843 * Merely reports reads from unimplemented registers.
4844 *
4845 * @returns VBox status code.
4846 *
4847 * @param pState The device state structure.
4848 * @param offset Register offset in memory-mapped frame.
4849 * @param index Register index in register array.
4850 * @thread EMT
4851 */
4852
4853static int e1kRegReadUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
4854{
4855 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
4856 INSTANCE(pState), offset, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
4857 *pu32Value = 0;
4858
4859 return VINF_SUCCESS;
4860}
4861
4862/**
4863 * Default register read handler with automatic clear operation.
4864 *
4865 * Retrieves the value of register from register array in device state structure.
4866 * Then resets all bits.
4867 *
4868 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
4869 * done in the caller.
4870 *
4871 * @returns VBox status code.
4872 *
4873 * @param pState The device state structure.
4874 * @param offset Register offset in memory-mapped frame.
4875 * @param index Register index in register array.
4876 * @thread EMT
4877 */
4878
4879static int e1kRegReadAutoClear(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
4880{
4881 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
4882 int rc = e1kRegReadDefault(pState, offset, index, pu32Value);
4883 pState->auRegs[index] = 0;
4884
4885 return rc;
4886}
4887
4888/**
4889 * Default register read handler.
4890 *
4891 * Retrieves the value of register from register array in device state structure.
4892 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
4893 *
4894 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
4895 * done in the caller.
4896 *
4897 * @returns VBox status code.
4898 *
4899 * @param pState The device state structure.
4900 * @param offset Register offset in memory-mapped frame.
4901 * @param index Register index in register array.
4902 * @thread EMT
4903 */
4904
4905static int e1kRegReadDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
4906{
4907 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
4908 *pu32Value = pState->auRegs[index] & s_e1kRegMap[index].readable;
4909
4910 return VINF_SUCCESS;
4911}
4912
4913/**
4914 * Write handler for unimplemented registers.
4915 *
4916 * Merely reports writes to unimplemented registers.
4917 *
4918 * @param pState The device state structure.
4919 * @param offset Register offset in memory-mapped frame.
4920 * @param index Register index in register array.
4921 * @param value The value to store.
4922 * @thread EMT
4923 */
4924
4925 static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
4926{
4927 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
4928 INSTANCE(pState), offset, value, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
4929
4930 return VINF_SUCCESS;
4931}
4932
4933/**
4934 * Default register write handler.
4935 *
4936 * Stores the value to the register array in device state structure. Only bits
4937 * corresponding to 1s both in 'writable' and 'mask' will be stored.
4938 *
4939 * @returns VBox status code.
4940 *
4941 * @param pState The device state structure.
4942 * @param offset Register offset in memory-mapped frame.
4943 * @param index Register index in register array.
4944 * @param value The value to store.
4945 * @param mask Used to implement partial writes (8 and 16-bit).
4946 * @thread EMT
4947 */
4948
4949static int e1kRegWriteDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
4950{
4951 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
4952 pState->auRegs[index] = (value & s_e1kRegMap[index].writable) |
4953 (pState->auRegs[index] & ~s_e1kRegMap[index].writable);
4954
4955 return VINF_SUCCESS;
4956}
4957
4958/**
4959 * Search register table for matching register.
4960 *
4961 * @returns Index in the register table or -1 if not found.
4962 *
4963 * @param pState The device state structure.
4964 * @param uOffset Register offset in memory-mapped region.
4965 * @thread EMT
4966 */
4967static int e1kRegLookup(E1KSTATE *pState, uint32_t uOffset)
4968{
4969 int index;
4970
4971 for (index = 0; index < E1K_NUM_OF_REGS; index++)
4972 {
4973 if (s_e1kRegMap[index].offset <= uOffset && uOffset < s_e1kRegMap[index].offset + s_e1kRegMap[index].size)
4974 {
4975 return index;
4976 }
4977 }
4978
4979 return -1;
4980}
4981
4982/**
4983 * Handle register read operation.
4984 *
4985 * Looks up and calls appropriate handler.
4986 *
4987 * @returns VBox status code.
4988 *
4989 * @param pState The device state structure.
4990 * @param uOffset Register offset in memory-mapped frame.
4991 * @param pv Where to store the result.
4992 * @param cb Number of bytes to read.
4993 * @thread EMT
4994 */
4995static int e1kRegRead(E1KSTATE *pState, uint32_t uOffset, void *pv, uint32_t cb)
4996{
4997 uint32_t u32 = 0;
4998 uint32_t mask = 0;
4999 uint32_t shift;
5000 int rc = VINF_SUCCESS;
5001 int index = e1kRegLookup(pState, uOffset);
5002 const char *szInst = INSTANCE(pState);
5003#ifdef DEBUG
5004 char buf[9];
5005#endif
5006
5007 /*
5008 * From the spec:
5009 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5010 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5011 */
5012
5013 /*
5014 * To be able to write bytes and short word we convert them
5015 * to properly shifted 32-bit words and masks. The idea is
5016 * to keep register-specific handlers simple. Most accesses
5017 * will be 32-bit anyway.
5018 */
5019 switch (cb)
5020 {
5021 case 1: mask = 0x000000FF; break;
5022 case 2: mask = 0x0000FFFF; break;
5023 case 4: mask = 0xFFFFFFFF; break;
5024 default:
5025 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5026 "%s e1kRegRead: unsupported op size: offset=%#10x cb=%#10x\n",
5027 szInst, uOffset, cb);
5028 }
5029 if (index != -1)
5030 {
5031 if (s_e1kRegMap[index].readable)
5032 {
5033 /* Make the mask correspond to the bits we are about to read. */
5034 shift = (uOffset - s_e1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5035 mask <<= shift;
5036 if (!mask)
5037 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5038 "%s e1kRegRead: Zero mask: offset=%#10x cb=%#10x\n",
5039 szInst, uOffset, cb);
5040 /*
5041 * Read it. Pass the mask so the handler knows what has to be read.
5042 * Mask out irrelevant bits.
5043 */
5044 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5045 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5046 return rc;
5047 //pState->fDelayInts = false;
5048 //pState->iStatIntLost += pState->iStatIntLostOne;
5049 //pState->iStatIntLostOne = 0;
5050 rc = s_e1kRegMap[index].pfnRead(pState, uOffset & 0xFFFFFFFC, index, &u32);
5051 u32 &= mask;
5052 //e1kCsLeave(pState);
5053 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5054 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5055 /* Shift back the result. */
5056 u32 >>= shift;
5057 }
5058 else
5059 {
5060 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5061 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5062 }
5063 }
5064 else
5065 {
5066 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5067 szInst, uOffset, e1kU32toHex(u32, mask, buf)));
5068 }
5069
5070 memcpy(pv, &u32, cb);
5071 return rc;
5072}
5073
5074/**
5075 * Handle register write operation.
5076 *
5077 * Looks up and calls appropriate handler.
5078 *
5079 * @returns VBox status code.
5080 *
5081 * @param pState The device state structure.
5082 * @param uOffset Register offset in memory-mapped frame.
5083 * @param pv Where to fetch the value.
5084 * @param cb Number of bytes to write.
5085 * @thread EMT
5086 */
5087static int e1kRegWrite(E1KSTATE *pState, uint32_t uOffset, void const *pv, unsigned cb)
5088{
5089 int rc = VINF_SUCCESS;
5090 int index = e1kRegLookup(pState, uOffset);
5091 uint32_t u32;
5092
5093 /*
5094 * From the spec:
5095 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5096 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5097 */
5098
5099 if (cb != 4)
5100 {
5101 E1kLog(("%s e1kRegWrite: Spec violation: unsupported op size: offset=%#10x cb=%#10x, ignored.\n",
5102 INSTANCE(pState), uOffset, cb));
5103 return VINF_SUCCESS;
5104 }
5105 if (uOffset & 3)
5106 {
5107 E1kLog(("%s e1kRegWrite: Spec violation: misaligned offset: %#10x cb=%#10x, ignored.\n",
5108 INSTANCE(pState), uOffset, cb));
5109 return VINF_SUCCESS;
5110 }
5111 u32 = *(uint32_t*)pv;
5112 if (index != -1)
5113 {
5114 if (s_e1kRegMap[index].writable)
5115 {
5116 /*
5117 * Write it. Pass the mask so the handler knows what has to be written.
5118 * Mask out irrelevant bits.
5119 */
5120 E1kLog2(("%s At %08X write %08X to %s (%s)\n",
5121 INSTANCE(pState), uOffset, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5122 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5123 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5124 return rc;
5125 //pState->fDelayInts = false;
5126 //pState->iStatIntLost += pState->iStatIntLostOne;
5127 //pState->iStatIntLostOne = 0;
5128 rc = s_e1kRegMap[index].pfnWrite(pState, uOffset, index, u32);
5129 //e1kCsLeave(pState);
5130 }
5131 else
5132 {
5133 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5134 INSTANCE(pState), uOffset, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5135 }
5136 }
5137 else
5138 {
5139 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5140 INSTANCE(pState), uOffset, u32));
5141 }
5142 return rc;
5143}
5144
5145/**
5146 * I/O handler for memory-mapped read operations.
5147 *
5148 * @returns VBox status code.
5149 *
5150 * @param pDevIns The device instance.
5151 * @param pvUser User argument.
5152 * @param GCPhysAddr Physical address (in GC) where the read starts.
5153 * @param pv Where to store the result.
5154 * @param cb Number of bytes read.
5155 * @thread EMT
5156 */
5157PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser,
5158 RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5159{
5160 NOREF(pvUser);
5161 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5162 uint32_t uOffset = GCPhysAddr - pState->addrMMReg;
5163 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIORead), a);
5164
5165 Assert(uOffset < E1K_MM_SIZE);
5166
5167 int rc = e1kRegRead(pState, uOffset, pv, cb);
5168 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIORead), a);
5169 return rc;
5170}
5171
5172/**
5173 * Memory mapped I/O Handler for write operations.
5174 *
5175 * @returns VBox status code.
5176 *
5177 * @param pDevIns The device instance.
5178 * @param pvUser User argument.
5179 * @param GCPhysAddr Physical address (in GC) where the read starts.
5180 * @param pv Where to fetch the value.
5181 * @param cb Number of bytes to write.
5182 * @thread EMT
5183 */
5184PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser,
5185 RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5186{
5187 NOREF(pvUser);
5188 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5189 uint32_t uOffset = GCPhysAddr - pState->addrMMReg;
5190 int rc;
5191 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5192
5193 Assert(uOffset < E1K_MM_SIZE);
5194 if (cb != 4)
5195 {
5196 E1kLog(("%s e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x", pDevIns, uOffset, cb));
5197 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x\n", uOffset, cb);
5198 }
5199 else
5200 rc = e1kRegWrite(pState, uOffset, pv, cb);
5201
5202 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5203 return rc;
5204}
5205
5206/**
5207 * Port I/O Handler for IN operations.
5208 *
5209 * @returns VBox status code.
5210 *
5211 * @param pDevIns The device instance.
5212 * @param pvUser Pointer to the device state structure.
5213 * @param port Port number used for the IN operation.
5214 * @param pu32 Where to store the result.
5215 * @param cb Number of bytes read.
5216 * @thread EMT
5217 */
5218PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser,
5219 RTIOPORT port, uint32_t *pu32, unsigned cb)
5220{
5221 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5222 int rc = VINF_SUCCESS;
5223 const char *szInst = INSTANCE(pState);
5224 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIORead), a);
5225
5226 port -= pState->addrIOPort;
5227 if (cb != 4)
5228 {
5229 E1kLog(("%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x", szInst, port, cb));
5230 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5231 }
5232 else
5233 switch (port)
5234 {
5235 case 0x00: /* IOADDR */
5236 *pu32 = pState->uSelectedReg;
5237 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5238 break;
5239 case 0x04: /* IODATA */
5240 rc = e1kRegRead(pState, pState->uSelectedReg, pu32, cb);
5241 /** @todo wrong return code triggers assertions in the debug build; fix please */
5242 if (rc == VINF_IOM_R3_MMIO_READ)
5243 rc = VINF_IOM_R3_IOPORT_READ;
5244
5245 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5246 break;
5247 default:
5248 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", szInst, port));
5249 //*pRC = VERR_IOM_IOPORT_UNUSED;
5250 }
5251
5252 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIORead), a);
5253 return rc;
5254}
5255
5256
5257/**
5258 * Port I/O Handler for OUT operations.
5259 *
5260 * @returns VBox status code.
5261 *
5262 * @param pDevIns The device instance.
5263 * @param pvUser User argument.
5264 * @param Port Port number used for the IN operation.
5265 * @param u32 The value to output.
5266 * @param cb The value size in bytes.
5267 * @thread EMT
5268 */
5269PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser,
5270 RTIOPORT port, uint32_t u32, unsigned cb)
5271{
5272 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5273 int rc = VINF_SUCCESS;
5274 const char *szInst = INSTANCE(pState);
5275 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIOWrite), a);
5276
5277 E1kLog2(("%s e1kIOPortOut: port=%RTiop value=%08x\n", szInst, port, u32));
5278 if (cb != 4)
5279 {
5280 E1kLog(("%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb));
5281 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5282 }
5283 else
5284 {
5285 port -= pState->addrIOPort;
5286 switch (port)
5287 {
5288 case 0x00: /* IOADDR */
5289 pState->uSelectedReg = u32;
5290 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", szInst, pState->uSelectedReg));
5291 break;
5292 case 0x04: /* IODATA */
5293 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", szInst, pState->uSelectedReg, u32));
5294 rc = e1kRegWrite(pState, pState->uSelectedReg, &u32, cb);
5295 /** @todo wrong return code triggers assertions in the debug build; fix please */
5296 if (rc == VINF_IOM_R3_MMIO_WRITE)
5297 rc = VINF_IOM_R3_IOPORT_WRITE;
5298 break;
5299 default:
5300 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", szInst, port));
5301 /** @todo Do we need to return an error here?
5302 * bird: VINF_SUCCESS is fine for unhandled cases of an OUT handler. (If you're curious
5303 * about the guest code and a bit adventuresome, try rc = PDMDeviceDBGFStop(...);) */
5304 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kIOPortOut: invalid port %#010x\n", port);
5305 }
5306 }
5307
5308 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIOWrite), a);
5309 return rc;
5310}
5311
5312#ifdef IN_RING3
5313/**
5314 * Dump complete device state to log.
5315 *
5316 * @param pState Pointer to device state.
5317 */
5318static void e1kDumpState(E1KSTATE *pState)
5319{
5320 for (int i = 0; i<E1K_NUM_OF_32BIT_REGS; ++i)
5321 {
5322 E1kLog2(("%s %8.8s = %08x\n", INSTANCE(pState),
5323 s_e1kRegMap[i].abbrev, pState->auRegs[i]));
5324 }
5325#ifdef E1K_INT_STATS
5326 LogRel(("%s Interrupt attempts: %d\n", INSTANCE(pState), pState->uStatIntTry));
5327 LogRel(("%s Interrupts raised : %d\n", INSTANCE(pState), pState->uStatInt));
5328 LogRel(("%s Interrupts lowered: %d\n", INSTANCE(pState), pState->uStatIntLower));
5329 LogRel(("%s Interrupts delayed: %d\n", INSTANCE(pState), pState->uStatIntDly));
5330 LogRel(("%s Disabled delayed: %d\n", INSTANCE(pState), pState->uStatDisDly));
5331 LogRel(("%s Interrupts skipped: %d\n", INSTANCE(pState), pState->uStatIntSkip));
5332 LogRel(("%s Masked interrupts : %d\n", INSTANCE(pState), pState->uStatIntMasked));
5333 LogRel(("%s Early interrupts : %d\n", INSTANCE(pState), pState->uStatIntEarly));
5334 LogRel(("%s Late interrupts : %d\n", INSTANCE(pState), pState->uStatIntLate));
5335 LogRel(("%s Lost interrupts : %d\n", INSTANCE(pState), pState->iStatIntLost));
5336 LogRel(("%s Interrupts by RX : %d\n", INSTANCE(pState), pState->uStatIntRx));
5337 LogRel(("%s Interrupts by TX : %d\n", INSTANCE(pState), pState->uStatIntTx));
5338 LogRel(("%s Interrupts by ICS : %d\n", INSTANCE(pState), pState->uStatIntICS));
5339 LogRel(("%s Interrupts by RDTR: %d\n", INSTANCE(pState), pState->uStatIntRDTR));
5340 LogRel(("%s Interrupts by RDMT: %d\n", INSTANCE(pState), pState->uStatIntRXDMT0));
5341 LogRel(("%s Interrupts by TXQE: %d\n", INSTANCE(pState), pState->uStatIntTXQE));
5342 LogRel(("%s TX int delay asked: %d\n", INSTANCE(pState), pState->uStatTxIDE));
5343 LogRel(("%s TX no report asked: %d\n", INSTANCE(pState), pState->uStatTxNoRS));
5344 LogRel(("%s TX abs timer expd : %d\n", INSTANCE(pState), pState->uStatTAD));
5345 LogRel(("%s TX int timer expd : %d\n", INSTANCE(pState), pState->uStatTID));
5346 LogRel(("%s RX abs timer expd : %d\n", INSTANCE(pState), pState->uStatRAD));
5347 LogRel(("%s RX int timer expd : %d\n", INSTANCE(pState), pState->uStatRID));
5348 LogRel(("%s TX CTX descriptors: %d\n", INSTANCE(pState), pState->uStatDescCtx));
5349 LogRel(("%s TX DAT descriptors: %d\n", INSTANCE(pState), pState->uStatDescDat));
5350 LogRel(("%s TX LEG descriptors: %d\n", INSTANCE(pState), pState->uStatDescLeg));
5351 LogRel(("%s Received frames : %d\n", INSTANCE(pState), pState->uStatRxFrm));
5352 LogRel(("%s Transmitted frames: %d\n", INSTANCE(pState), pState->uStatTxFrm));
5353#endif /* E1K_INT_STATS */
5354}
5355
5356/**
5357 * Map PCI I/O region.
5358 *
5359 * @return VBox status code.
5360 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
5361 * @param iRegion The region number.
5362 * @param GCPhysAddress Physical address of the region. If iType is PCI_ADDRESS_SPACE_IO, this is an
5363 * I/O port, else it's a physical address.
5364 * This address is *NOT* relative to pci_mem_base like earlier!
5365 * @param cb Region size.
5366 * @param enmType One of the PCI_ADDRESS_SPACE_* values.
5367 * @thread EMT
5368 */
5369static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion,
5370 RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
5371{
5372 int rc;
5373 E1KSTATE *pState = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
5374
5375 switch (enmType)
5376 {
5377 case PCI_ADDRESS_SPACE_IO:
5378 pState->addrIOPort = (RTIOPORT)GCPhysAddress;
5379 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5380 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
5381 if (RT_FAILURE(rc))
5382 break;
5383 if (pState->fR0Enabled)
5384 {
5385 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5386 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5387 if (RT_FAILURE(rc))
5388 break;
5389 }
5390 if (pState->fGCEnabled)
5391 {
5392 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5393 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5394 }
5395 break;
5396 case PCI_ADDRESS_SPACE_MEM:
5397 pState->addrMMReg = GCPhysAddress;
5398 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
5399 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
5400 e1kMMIOWrite, e1kMMIORead, "E1000");
5401 if (pState->fR0Enabled)
5402 {
5403 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
5404 "e1kMMIOWrite", "e1kMMIORead");
5405 if (RT_FAILURE(rc))
5406 break;
5407 }
5408 if (pState->fGCEnabled)
5409 {
5410 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
5411 "e1kMMIOWrite", "e1kMMIORead");
5412 }
5413 break;
5414 default:
5415 /* We should never get here */
5416 AssertMsgFailed(("Invalid PCI address space param in map callback"));
5417 rc = VERR_INTERNAL_ERROR;
5418 break;
5419 }
5420 return rc;
5421}
5422
5423/**
5424 * Check if the device can receive data now.
5425 * This must be called before the pfnRecieve() method is called.
5426 *
5427 * @returns Number of bytes the device can receive.
5428 * @param pInterface Pointer to the interface structure containing the called function pointer.
5429 * @thread EMT
5430 */
5431static int e1kCanReceive(E1KSTATE *pState)
5432{
5433 size_t cb;
5434
5435 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
5436 return VERR_NET_NO_BUFFER_SPACE;
5437
5438 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
5439 {
5440 E1KRXDESC desc;
5441 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
5442 &desc, sizeof(desc));
5443 if (desc.status.fDD)
5444 cb = 0;
5445 else
5446 cb = pState->u16RxBSize;
5447 }
5448 else if (RDH < RDT)
5449 cb = (RDT - RDH) * pState->u16RxBSize;
5450 else if (RDH > RDT)
5451 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pState->u16RxBSize;
5452 else
5453 {
5454 cb = 0;
5455 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
5456 }
5457 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
5458 INSTANCE(pState), RDH, RDT, RDLEN, pState->u16RxBSize, cb));
5459
5460 e1kCsRxLeave(pState);
5461 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
5462}
5463
5464/**
5465 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
5466 */
5467static DECLCALLBACK(int) e1kNetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
5468{
5469 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5470 int rc = e1kCanReceive(pState);
5471
5472 if (RT_SUCCESS(rc))
5473 return VINF_SUCCESS;
5474 if (RT_UNLIKELY(cMillies == 0))
5475 return VERR_NET_NO_BUFFER_SPACE;
5476
5477 rc = VERR_INTERRUPTED;
5478 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, true);
5479 STAM_PROFILE_START(&pState->StatRxOverflow, a);
5480 VMSTATE enmVMState;
5481 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pState->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
5482 || enmVMState == VMSTATE_RUNNING_LS))
5483 {
5484 int rc2 = e1kCanReceive(pState);
5485 if (RT_SUCCESS(rc2))
5486 {
5487 rc = VINF_SUCCESS;
5488 break;
5489 }
5490 E1kLogRel(("E1000 e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n",
5491 cMillies));
5492 E1kLog(("%s e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n",
5493 INSTANCE(pState), cMillies));
5494 RTSemEventWait(pState->hEventMoreRxDescAvail, cMillies);
5495 }
5496 STAM_PROFILE_STOP(&pState->StatRxOverflow, a);
5497 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, false);
5498
5499 return rc;
5500}
5501
5502
5503/**
5504 * Matches the packet addresses against Receive Address table. Looks for
5505 * exact matches only.
5506 *
5507 * @returns true if address matches.
5508 * @param pState Pointer to the state structure.
5509 * @param pvBuf The ethernet packet.
5510 * @param cb Number of bytes available in the packet.
5511 * @thread EMT
5512 */
5513static bool e1kPerfectMatch(E1KSTATE *pState, const void *pvBuf)
5514{
5515 for (unsigned i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
5516 {
5517 E1KRAELEM* ra = pState->aRecAddr.array + i;
5518
5519 /* Valid address? */
5520 if (ra->ctl & RA_CTL_AV)
5521 {
5522 Assert((ra->ctl & RA_CTL_AS) < 2);
5523 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
5524 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
5525 // INSTANCE(pState), pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
5526 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
5527 /*
5528 * Address Select:
5529 * 00b = Destination address
5530 * 01b = Source address
5531 * 10b = Reserved
5532 * 11b = Reserved
5533 * Since ethernet header is (DA, SA, len) we can use address
5534 * select as index.
5535 */
5536 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
5537 ra->addr, sizeof(ra->addr)) == 0)
5538 return true;
5539 }
5540 }
5541
5542 return false;
5543}
5544
5545/**
5546 * Matches the packet addresses against Multicast Table Array.
5547 *
5548 * @remarks This is imperfect match since it matches not exact address but
5549 * a subset of addresses.
5550 *
5551 * @returns true if address matches.
5552 * @param pState Pointer to the state structure.
5553 * @param pvBuf The ethernet packet.
5554 * @param cb Number of bytes available in the packet.
5555 * @thread EMT
5556 */
5557static bool e1kImperfectMatch(E1KSTATE *pState, const void *pvBuf)
5558{
5559 /* Get bits 32..47 of destination address */
5560 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
5561
5562 unsigned offset = GET_BITS(RCTL, MO);
5563 /*
5564 * offset means:
5565 * 00b = bits 36..47
5566 * 01b = bits 35..46
5567 * 10b = bits 34..45
5568 * 11b = bits 32..43
5569 */
5570 if (offset < 3)
5571 u16Bit = u16Bit >> (4 - offset);
5572 return ASMBitTest(pState->auMTA, u16Bit & 0xFFF);
5573}
5574
5575/**
5576 * Determines if the packet is to be delivered to upper layer. The following
5577 * filters supported:
5578 * - Exact Unicast/Multicast
5579 * - Promiscuous Unicast/Multicast
5580 * - Multicast
5581 * - VLAN
5582 *
5583 * @returns true if packet is intended for this node.
5584 * @param pState Pointer to the state structure.
5585 * @param pvBuf The ethernet packet.
5586 * @param cb Number of bytes available in the packet.
5587 * @param pStatus Bit field to store status bits.
5588 * @thread EMT
5589 */
5590static bool e1kAddressFilter(E1KSTATE *pState, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
5591{
5592 Assert(cb > 14);
5593 /* Assume that we fail to pass exact filter. */
5594 pStatus->fPIF = false;
5595 pStatus->fVP = false;
5596 /* Discard oversized packets */
5597 if (cb > E1K_MAX_RX_PKT_SIZE)
5598 {
5599 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
5600 INSTANCE(pState), cb, E1K_MAX_RX_PKT_SIZE));
5601 E1K_INC_CNT32(ROC);
5602 return false;
5603 }
5604 else if (!(RCTL & RCTL_LPE) && cb > 1522)
5605 {
5606 /* When long packet reception is disabled packets over 1522 are discarded */
5607 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
5608 INSTANCE(pState), cb));
5609 E1K_INC_CNT32(ROC);
5610 return false;
5611 }
5612
5613 uint16_t *u16Ptr = (uint16_t*)pvBuf;
5614 /* Compare TPID with VLAN Ether Type */
5615 if (RT_BE2H_U16(u16Ptr[6]) == VET)
5616 {
5617 pStatus->fVP = true;
5618 /* Is VLAN filtering enabled? */
5619 if (RCTL & RCTL_VFE)
5620 {
5621 /* It is 802.1q packet indeed, let's filter by VID */
5622 if (RCTL & RCTL_CFIEN)
5623 {
5624 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", INSTANCE(pState),
5625 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
5626 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
5627 !!(RCTL & RCTL_CFI)));
5628 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
5629 {
5630 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
5631 INSTANCE(pState), E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
5632 return false;
5633 }
5634 }
5635 else
5636 E1kLog3(("%s VLAN filter: VLAN=%d\n", INSTANCE(pState),
5637 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
5638 if (!ASMBitTest(pState->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
5639 {
5640 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
5641 INSTANCE(pState), E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
5642 return false;
5643 }
5644 }
5645 }
5646 /* Broadcast filtering */
5647 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
5648 return true;
5649 E1kLog2(("%s Packet filter: not a broadcast\n", INSTANCE(pState)));
5650 if (e1kIsMulticast(pvBuf))
5651 {
5652 /* Is multicast promiscuous enabled? */
5653 if (RCTL & RCTL_MPE)
5654 return true;
5655 E1kLog2(("%s Packet filter: no promiscuous multicast\n", INSTANCE(pState)));
5656 /* Try perfect matches first */
5657 if (e1kPerfectMatch(pState, pvBuf))
5658 {
5659 pStatus->fPIF = true;
5660 return true;
5661 }
5662 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
5663 if (e1kImperfectMatch(pState, pvBuf))
5664 return true;
5665 E1kLog2(("%s Packet filter: no imperfect match\n", INSTANCE(pState)));
5666 }
5667 else {
5668 /* Is unicast promiscuous enabled? */
5669 if (RCTL & RCTL_UPE)
5670 return true;
5671 E1kLog2(("%s Packet filter: no promiscuous unicast\n", INSTANCE(pState)));
5672 if (e1kPerfectMatch(pState, pvBuf))
5673 {
5674 pStatus->fPIF = true;
5675 return true;
5676 }
5677 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
5678 }
5679 E1kLog2(("%s Packet filter: packet discarded\n", INSTANCE(pState)));
5680 return false;
5681}
5682
5683/**
5684 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
5685 */
5686static DECLCALLBACK(int) e1kNetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
5687{
5688 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5689 int rc = VINF_SUCCESS;
5690
5691 /*
5692 * Drop packets if the VM is not running yet/anymore.
5693 */
5694 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pState));
5695 if ( enmVMState != VMSTATE_RUNNING
5696 && enmVMState != VMSTATE_RUNNING_LS)
5697 {
5698 E1kLog(("%s Dropping incoming packet as VM is not running.\n", INSTANCE(pState)));
5699 return VINF_SUCCESS;
5700 }
5701
5702 /* Discard incoming packets in locked state */
5703 if (!(RCTL & RCTL_EN) || pState->fLocked || !(STATUS & STATUS_LU))
5704 {
5705 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", INSTANCE(pState)));
5706 return VINF_SUCCESS;
5707 }
5708
5709 STAM_PROFILE_ADV_START(&pState->StatReceive, a);
5710
5711 //if (!e1kCsEnter(pState, RT_SRC_POS))
5712 // return VERR_PERMISSION_DENIED;
5713
5714 e1kPacketDump(pState, (const uint8_t*)pvBuf, cb, "<-- Incoming");
5715
5716 /* Update stats */
5717 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
5718 {
5719 E1K_INC_CNT32(TPR);
5720 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
5721 e1kCsLeave(pState);
5722 }
5723 STAM_PROFILE_ADV_START(&pState->StatReceiveFilter, a);
5724 E1KRXDST status;
5725 RT_ZERO(status);
5726 bool fPassed = e1kAddressFilter(pState, pvBuf, cb, &status);
5727 STAM_PROFILE_ADV_STOP(&pState->StatReceiveFilter, a);
5728 if (fPassed)
5729 {
5730 rc = e1kHandleRxPacket(pState, pvBuf, cb, status);
5731 }
5732 //e1kCsLeave(pState);
5733 STAM_PROFILE_ADV_STOP(&pState->StatReceive, a);
5734
5735 return rc;
5736}
5737
5738/**
5739 * Gets the pointer to the status LED of a unit.
5740 *
5741 * @returns VBox status code.
5742 * @param pInterface Pointer to the interface structure.
5743 * @param iLUN The unit which status LED we desire.
5744 * @param ppLed Where to store the LED pointer.
5745 * @thread EMT
5746 */
5747static DECLCALLBACK(int) e1kQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
5748{
5749 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
5750 int rc = VERR_PDM_LUN_NOT_FOUND;
5751
5752 if (iLUN == 0)
5753 {
5754 *ppLed = &pState->led;
5755 rc = VINF_SUCCESS;
5756 }
5757 return rc;
5758}
5759
5760/**
5761 * Gets the current Media Access Control (MAC) address.
5762 *
5763 * @returns VBox status code.
5764 * @param pInterface Pointer to the interface structure containing the called function pointer.
5765 * @param pMac Where to store the MAC address.
5766 * @thread EMT
5767 */
5768static DECLCALLBACK(int) e1kGetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
5769{
5770 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
5771 pState->eeprom.getMac(pMac);
5772 return VINF_SUCCESS;
5773}
5774
5775
5776/**
5777 * Gets the new link state.
5778 *
5779 * @returns The current link state.
5780 * @param pInterface Pointer to the interface structure containing the called function pointer.
5781 * @thread EMT
5782 */
5783static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kGetLinkState(PPDMINETWORKCONFIG pInterface)
5784{
5785 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
5786 if (STATUS & STATUS_LU)
5787 return PDMNETWORKLINKSTATE_UP;
5788 return PDMNETWORKLINKSTATE_DOWN;
5789}
5790
5791
5792/**
5793 * Sets the new link state.
5794 *
5795 * @returns VBox status code.
5796 * @param pInterface Pointer to the interface structure containing the called function pointer.
5797 * @param enmState The new link state
5798 * @thread EMT
5799 */
5800static DECLCALLBACK(int) e1kSetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
5801{
5802 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
5803 bool fOldUp = !!(STATUS & STATUS_LU);
5804 bool fNewUp = enmState == PDMNETWORKLINKSTATE_UP;
5805
5806 if ( fNewUp != fOldUp
5807 || (!fNewUp && pState->fCableConnected)) /* old state was connected but STATUS not
5808 * yet written by guest */
5809 {
5810 if (fNewUp)
5811 {
5812 E1kLog(("%s Link will be up in approximately 5 secs\n", INSTANCE(pState)));
5813 pState->fCableConnected = true;
5814 STATUS &= ~STATUS_LU;
5815 Phy::setLinkStatus(&pState->phy, false);
5816 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
5817 /* Restore the link back in 5 second. */
5818 e1kArmTimer(pState, pState->pLUTimerR3, 5000000);
5819 }
5820 else
5821 {
5822 E1kLog(("%s Link is down\n", INSTANCE(pState)));
5823 pState->fCableConnected = false;
5824 STATUS &= ~STATUS_LU;
5825 Phy::setLinkStatus(&pState->phy, false);
5826 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
5827 }
5828 if (pState->pDrvR3)
5829 pState->pDrvR3->pfnNotifyLinkChanged(pState->pDrvR3, enmState);
5830 }
5831 return VINF_SUCCESS;
5832}
5833
5834/**
5835 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
5836 */
5837static DECLCALLBACK(void *) e1kQueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
5838{
5839 E1KSTATE *pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
5840 Assert(&pThis->IBase == pInterface);
5841
5842 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
5843 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
5844 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
5845 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
5846 return NULL;
5847}
5848
5849/**
5850 * Saves the configuration.
5851 *
5852 * @param pState The E1K state.
5853 * @param pSSM The handle to the saved state.
5854 */
5855static void e1kSaveConfig(E1KSTATE *pState, PSSMHANDLE pSSM)
5856{
5857 SSMR3PutMem(pSSM, &pState->macConfigured, sizeof(pState->macConfigured));
5858 SSMR3PutU32(pSSM, pState->eChip);
5859}
5860
5861/**
5862 * Live save - save basic configuration.
5863 *
5864 * @returns VBox status code.
5865 * @param pDevIns The device instance.
5866 * @param pSSM The handle to the saved state.
5867 * @param uPass
5868 */
5869static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
5870{
5871 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
5872 e1kSaveConfig(pState, pSSM);
5873 return VINF_SSM_DONT_CALL_AGAIN;
5874}
5875
5876/**
5877 * Prepares for state saving.
5878 *
5879 * @returns VBox status code.
5880 * @param pDevIns The device instance.
5881 * @param pSSM The handle to the saved state.
5882 */
5883static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
5884{
5885 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
5886
5887 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
5888 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5889 return rc;
5890 e1kCsLeave(pState);
5891 return VINF_SUCCESS;
5892#if 0
5893 /* 1) Prevent all threads from modifying the state and memory */
5894 //pState->fLocked = true;
5895 /* 2) Cancel all timers */
5896#ifdef E1K_USE_TX_TIMERS
5897 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
5898#ifndef E1K_NO_TAD
5899 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
5900#endif /* E1K_NO_TAD */
5901#endif /* E1K_USE_TX_TIMERS */
5902#ifdef E1K_USE_RX_TIMERS
5903 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
5904 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
5905#endif /* E1K_USE_RX_TIMERS */
5906 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
5907 /* 3) Did I forget anything? */
5908 E1kLog(("%s Locked\n", INSTANCE(pState)));
5909 return VINF_SUCCESS;
5910#endif
5911}
5912
5913
5914/**
5915 * Saves the state of device.
5916 *
5917 * @returns VBox status code.
5918 * @param pDevIns The device instance.
5919 * @param pSSM The handle to the saved state.
5920 */
5921static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
5922{
5923 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
5924
5925 e1kSaveConfig(pState, pSSM);
5926 pState->eeprom.save(pSSM);
5927 e1kDumpState(pState);
5928 SSMR3PutMem(pSSM, pState->auRegs, sizeof(pState->auRegs));
5929 SSMR3PutBool(pSSM, pState->fIntRaised);
5930 Phy::saveState(pSSM, &pState->phy);
5931 SSMR3PutU32(pSSM, pState->uSelectedReg);
5932 SSMR3PutMem(pSSM, pState->auMTA, sizeof(pState->auMTA));
5933 SSMR3PutMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
5934 SSMR3PutMem(pSSM, pState->auVFTA, sizeof(pState->auVFTA));
5935 SSMR3PutU64(pSSM, pState->u64AckedAt);
5936 SSMR3PutU16(pSSM, pState->u16RxBSize);
5937 //SSMR3PutBool(pSSM, pState->fDelayInts);
5938 //SSMR3PutBool(pSSM, pState->fIntMaskUsed);
5939 SSMR3PutU16(pSSM, pState->u16TxPktLen);
5940/** @todo State wrt to the TSE buffer is incomplete, so little point in
5941 * saving this actually. */
5942 SSMR3PutMem(pSSM, pState->aTxPacketFallback, pState->u16TxPktLen);
5943 SSMR3PutBool(pSSM, pState->fIPcsum);
5944 SSMR3PutBool(pSSM, pState->fTCPcsum);
5945 SSMR3PutMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
5946 SSMR3PutMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
5947 SSMR3PutBool(pSSM, pState->fVTag);
5948 SSMR3PutU16(pSSM, pState->u16VTagTCI);
5949#ifdef E1K_WITH_TXD_CACHE
5950 SSMR3PutU8(pSSM, pState->nTxDFetched);
5951 SSMR3PutMem(pSSM, pState->aTxDescriptors,
5952 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
5953#endif /* E1K_WITH_TXD_CACHE */
5954/**@todo GSO requires some more state here. */
5955 E1kLog(("%s State has been saved\n", INSTANCE(pState)));
5956 return VINF_SUCCESS;
5957}
5958
5959#if 0
5960/**
5961 * Cleanup after saving.
5962 *
5963 * @returns VBox status code.
5964 * @param pDevIns The device instance.
5965 * @param pSSM The handle to the saved state.
5966 */
5967static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
5968{
5969 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
5970
5971 /* If VM is being powered off unlocking will result in assertions in PGM */
5972 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
5973 pState->fLocked = false;
5974 else
5975 E1kLog(("%s VM is not running -- remain locked\n", INSTANCE(pState)));
5976 E1kLog(("%s Unlocked\n", INSTANCE(pState)));
5977 return VINF_SUCCESS;
5978}
5979#endif
5980
5981/**
5982 * Sync with .
5983 *
5984 * @returns VBox status code.
5985 * @param pDevIns The device instance.
5986 * @param pSSM The handle to the saved state.
5987 */
5988static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
5989{
5990 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
5991
5992 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
5993 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5994 return rc;
5995 e1kCsLeave(pState);
5996 return VINF_SUCCESS;
5997}
5998
5999/**
6000 * Restore previously saved state of device.
6001 *
6002 * @returns VBox status code.
6003 * @param pDevIns The device instance.
6004 * @param pSSM The handle to the saved state.
6005 * @param uVersion The data unit version number.
6006 * @param uPass The data pass.
6007 */
6008static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6009{
6010 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6011 int rc;
6012
6013 if ( uVersion != E1K_SAVEDSTATE_VERSION
6014 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6015 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6016 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6017
6018 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6019 || uPass != SSM_PASS_FINAL)
6020 {
6021 /* config checks */
6022 RTMAC macConfigured;
6023 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6024 AssertRCReturn(rc, rc);
6025 if ( memcmp(&macConfigured, &pState->macConfigured, sizeof(macConfigured))
6026 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6027 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", INSTANCE(pState), &pState->macConfigured, &macConfigured));
6028
6029 E1KCHIP eChip;
6030 rc = SSMR3GetU32(pSSM, &eChip);
6031 AssertRCReturn(rc, rc);
6032 if (eChip != pState->eChip)
6033 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pState->eChip, eChip);
6034 }
6035
6036 if (uPass == SSM_PASS_FINAL)
6037 {
6038 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6039 {
6040 rc = pState->eeprom.load(pSSM);
6041 AssertRCReturn(rc, rc);
6042 }
6043 /* the state */
6044 SSMR3GetMem(pSSM, &pState->auRegs, sizeof(pState->auRegs));
6045 SSMR3GetBool(pSSM, &pState->fIntRaised);
6046 /** @todo: PHY could be made a separate device with its own versioning */
6047 Phy::loadState(pSSM, &pState->phy);
6048 SSMR3GetU32(pSSM, &pState->uSelectedReg);
6049 SSMR3GetMem(pSSM, &pState->auMTA, sizeof(pState->auMTA));
6050 SSMR3GetMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6051 SSMR3GetMem(pSSM, &pState->auVFTA, sizeof(pState->auVFTA));
6052 SSMR3GetU64(pSSM, &pState->u64AckedAt);
6053 SSMR3GetU16(pSSM, &pState->u16RxBSize);
6054 //SSMR3GetBool(pSSM, pState->fDelayInts);
6055 //SSMR3GetBool(pSSM, pState->fIntMaskUsed);
6056 SSMR3GetU16(pSSM, &pState->u16TxPktLen);
6057 SSMR3GetMem(pSSM, &pState->aTxPacketFallback[0], pState->u16TxPktLen);
6058 SSMR3GetBool(pSSM, &pState->fIPcsum);
6059 SSMR3GetBool(pSSM, &pState->fTCPcsum);
6060 SSMR3GetMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6061 rc = SSMR3GetMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6062 AssertRCReturn(rc, rc);
6063 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6064 {
6065 SSMR3GetBool(pSSM, &pState->fVTag);
6066 rc = SSMR3GetU16(pSSM, &pState->u16VTagTCI);
6067 AssertRCReturn(rc, rc);
6068#ifdef E1K_WITH_TXD_CACHE
6069 rc = SSMR3GetU8(pSSM, &pState->nTxDFetched);
6070 AssertRCReturn(rc, rc);
6071 SSMR3GetMem(pSSM, pState->aTxDescriptors,
6072 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6073#endif /* E1K_WITH_TXD_CACHE */
6074 }
6075 else
6076 {
6077 pState->fVTag = false;
6078 pState->u16VTagTCI = 0;
6079#ifdef E1K_WITH_TXD_CACHE
6080 pState->nTxDFetched = 0;
6081#endif /* E1K_WITH_TXD_CACHE */
6082 }
6083 /* derived state */
6084 e1kSetupGsoCtx(&pState->GsoCtx, &pState->contextTSE);
6085
6086 E1kLog(("%s State has been restored\n", INSTANCE(pState)));
6087 e1kDumpState(pState);
6088 }
6089 return VINF_SUCCESS;
6090}
6091
6092/**
6093 * Link status adjustments after loading.
6094 *
6095 * @returns VBox status code.
6096 * @param pDevIns The device instance.
6097 * @param pSSM The handle to the saved state.
6098 */
6099static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6100{
6101 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6102
6103 /* Update promiscuous mode */
6104 if (pState->pDrvR3)
6105 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3,
6106 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6107
6108 /*
6109 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6110 * passed to us. We go through all this stuff if the link was up and we
6111 * wasn't teleported.
6112 */
6113 if ( (STATUS & STATUS_LU)
6114 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns))
6115 {
6116 E1kLog(("%s Link is down temporarily\n", INSTANCE(pState)));
6117 STATUS &= ~STATUS_LU;
6118 Phy::setLinkStatus(&pState->phy, false);
6119 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6120 /* Restore the link back in five seconds. */
6121 e1kArmTimer(pState, pState->pLUTimerR3, 5000000);
6122 }
6123 return VINF_SUCCESS;
6124}
6125
6126
6127/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
6128
6129/**
6130 * Detach notification.
6131 *
6132 * One port on the network card has been disconnected from the network.
6133 *
6134 * @param pDevIns The device instance.
6135 * @param iLUN The logical unit which is being detached.
6136 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
6137 */
6138static DECLCALLBACK(void) e1kDetach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
6139{
6140 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6141 Log(("%s e1kDetach:\n", INSTANCE(pState)));
6142
6143 AssertLogRelReturnVoid(iLUN == 0);
6144
6145 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
6146
6147 /** @todo: r=pritesh still need to check if i missed
6148 * to clean something in this function
6149 */
6150
6151 /*
6152 * Zero some important members.
6153 */
6154 pState->pDrvBase = NULL;
6155 pState->pDrvR3 = NULL;
6156 pState->pDrvR0 = NIL_RTR0PTR;
6157 pState->pDrvRC = NIL_RTRCPTR;
6158
6159 PDMCritSectLeave(&pState->cs);
6160}
6161
6162/**
6163 * Attach the Network attachment.
6164 *
6165 * One port on the network card has been connected to a network.
6166 *
6167 * @returns VBox status code.
6168 * @param pDevIns The device instance.
6169 * @param iLUN The logical unit which is being attached.
6170 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
6171 *
6172 * @remarks This code path is not used during construction.
6173 */
6174static DECLCALLBACK(int) e1kAttach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
6175{
6176 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6177 LogFlow(("%s e1kAttach:\n", INSTANCE(pState)));
6178
6179 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
6180
6181 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
6182
6183 /*
6184 * Attach the driver.
6185 */
6186 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
6187 if (RT_SUCCESS(rc))
6188 {
6189 if (rc == VINF_NAT_DNS)
6190 {
6191#ifdef RT_OS_LINUX
6192 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6193 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6194#else
6195 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6196 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6197#endif
6198 }
6199 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
6200 AssertMsgStmt(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
6201 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
6202 if (RT_SUCCESS(rc))
6203 {
6204 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0);
6205 pState->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
6206
6207 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC);
6208 pState->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
6209 }
6210 }
6211 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
6212 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
6213 {
6214 /* This should never happen because this function is not called
6215 * if there is no driver to attach! */
6216 Log(("%s No attached driver!\n", INSTANCE(pState)));
6217 }
6218
6219 /*
6220 * Temporary set the link down if it was up so that the guest
6221 * will know that we have change the configuration of the
6222 * network card
6223 */
6224 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
6225 {
6226 STATUS &= ~STATUS_LU;
6227 Phy::setLinkStatus(&pState->phy, false);
6228 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6229 /* Restore the link back in 5 second. */
6230 e1kArmTimer(pState, pState->pLUTimerR3, 5000000);
6231 }
6232
6233 PDMCritSectLeave(&pState->cs);
6234 return rc;
6235
6236}
6237
6238/**
6239 * @copydoc FNPDMDEVPOWEROFF
6240 */
6241static DECLCALLBACK(void) e1kPowerOff(PPDMDEVINS pDevIns)
6242{
6243 /* Poke thread waiting for buffer space. */
6244 e1kWakeupReceive(pDevIns);
6245}
6246
6247/**
6248 * @copydoc FNPDMDEVRESET
6249 */
6250static DECLCALLBACK(void) e1kReset(PPDMDEVINS pDevIns)
6251{
6252 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6253 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
6254 e1kCancelTimer(pState, pState->CTX_SUFF(pLUTimer));
6255 e1kXmitFreeBuf(pState);
6256 pState->u16TxPktLen = 0;
6257 pState->fIPcsum = false;
6258 pState->fTCPcsum = false;
6259 pState->fIntMaskUsed = false;
6260 pState->fDelayInts = false;
6261 pState->fLocked = false;
6262 pState->u64AckedAt = 0;
6263#ifdef E1K_WITH_TXD_CACHE
6264 pState->nTxDFetched = 0;
6265 pState->iTxDCurrent = 0;
6266 pState->fGSO = false;
6267 pState->cbTxAlloc = 0;
6268#endif /* E1K_WITH_TXD_CACHE */
6269 e1kHardReset(pState);
6270}
6271
6272/**
6273 * @copydoc FNPDMDEVSUSPEND
6274 */
6275static DECLCALLBACK(void) e1kSuspend(PPDMDEVINS pDevIns)
6276{
6277 /* Poke thread waiting for buffer space. */
6278 e1kWakeupReceive(pDevIns);
6279}
6280
6281/**
6282 * Device relocation callback.
6283 *
6284 * When this callback is called the device instance data, and if the
6285 * device have a GC component, is being relocated, or/and the selectors
6286 * have been changed. The device must use the chance to perform the
6287 * necessary pointer relocations and data updates.
6288 *
6289 * Before the GC code is executed the first time, this function will be
6290 * called with a 0 delta so GC pointer calculations can be one in one place.
6291 *
6292 * @param pDevIns Pointer to the device instance.
6293 * @param offDelta The relocation delta relative to the old location.
6294 *
6295 * @remark A relocation CANNOT fail.
6296 */
6297static DECLCALLBACK(void) e1kRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
6298{
6299 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6300 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
6301 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
6302 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
6303#ifdef E1K_USE_RX_TIMERS
6304 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
6305 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
6306#endif /* E1K_USE_RX_TIMERS */
6307#ifdef E1K_USE_TX_TIMERS
6308 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
6309# ifndef E1K_NO_TAD
6310 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
6311# endif /* E1K_NO_TAD */
6312#endif /* E1K_USE_TX_TIMERS */
6313 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
6314 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
6315}
6316
6317/**
6318 * Destruct a device instance.
6319 *
6320 * We need to free non-VM resources only.
6321 *
6322 * @returns VBox status.
6323 * @param pDevIns The device instance data.
6324 * @thread EMT
6325 */
6326static DECLCALLBACK(int) e1kDestruct(PPDMDEVINS pDevIns)
6327{
6328 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6329 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
6330
6331 e1kDumpState(pState);
6332 E1kLog(("%s Destroying instance\n", INSTANCE(pState)));
6333 if (PDMCritSectIsInitialized(&pState->cs))
6334 {
6335 if (pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
6336 {
6337 RTSemEventSignal(pState->hEventMoreRxDescAvail);
6338 RTSemEventDestroy(pState->hEventMoreRxDescAvail);
6339 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
6340 }
6341 PDMR3CritSectDelete(&pState->csRx);
6342 //PDMR3CritSectDelete(&pState->csTx);
6343 PDMR3CritSectDelete(&pState->cs);
6344 }
6345 return VINF_SUCCESS;
6346}
6347
6348/**
6349 * Status info callback.
6350 *
6351 * @param pDevIns The device instance.
6352 * @param pHlp The output helpers.
6353 * @param pszArgs The arguments.
6354 */
6355static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
6356{
6357 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6358 unsigned i;
6359 // bool fRcvRing = false;
6360 // bool fXmtRing = false;
6361
6362 /*
6363 * Parse args.
6364 if (pszArgs)
6365 {
6366 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
6367 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
6368 }
6369 */
6370
6371 /*
6372 * Show info.
6373 */
6374 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RX32 mac-cfg=%RTmac %s%s%s\n",
6375 pDevIns->iInstance, pState->addrIOPort, pState->addrMMReg,
6376 &pState->macConfigured, g_Chips[pState->eChip].pcszName,
6377 pState->fGCEnabled ? " GC" : "", pState->fR0Enabled ? " R0" : "");
6378
6379 e1kCsEnter(pState, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
6380
6381 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6382 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", s_e1kRegMap[i].abbrev, pState->auRegs[i]);
6383
6384 for (i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
6385 {
6386 E1KRAELEM* ra = pState->aRecAddr.array + i;
6387 if (ra->ctl & RA_CTL_AV)
6388 {
6389 const char *pcszTmp;
6390 switch (ra->ctl & RA_CTL_AS)
6391 {
6392 case 0: pcszTmp = "DST"; break;
6393 case 1: pcszTmp = "SRC"; break;
6394 default: pcszTmp = "reserved";
6395 }
6396 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
6397 }
6398 }
6399
6400
6401#ifdef E1K_INT_STATS
6402 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pState->uStatIntTry);
6403 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pState->uStatInt);
6404 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pState->uStatIntLower);
6405 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pState->uStatIntDly);
6406 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pState->uStatDisDly);
6407 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pState->uStatIntSkip);
6408 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pState->uStatIntMasked);
6409 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pState->uStatIntEarly);
6410 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pState->uStatIntLate);
6411 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pState->iStatIntLost);
6412 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pState->uStatIntRx);
6413 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pState->uStatIntTx);
6414 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pState->uStatIntICS);
6415 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pState->uStatIntRDTR);
6416 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pState->uStatIntRXDMT0);
6417 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pState->uStatIntTXQE);
6418 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pState->uStatTxIDE);
6419 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pState->uStatTxNoRS);
6420 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pState->uStatTAD);
6421 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pState->uStatTID);
6422 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pState->uStatRAD);
6423 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pState->uStatRID);
6424 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pState->uStatDescCtx);
6425 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pState->uStatDescDat);
6426 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pState->uStatDescLeg);
6427 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pState->uStatRxFrm);
6428 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pState->uStatTxFrm);
6429#endif /* E1K_INT_STATS */
6430
6431 e1kCsLeave(pState);
6432}
6433
6434/**
6435 * Sets 8-bit register in PCI configuration space.
6436 * @param refPciDev The PCI device.
6437 * @param uOffset The register offset.
6438 * @param u16Value The value to store in the register.
6439 * @thread EMT
6440 */
6441DECLINLINE(void) e1kPCICfgSetU8(PCIDEVICE& refPciDev, uint32_t uOffset, uint8_t u8Value)
6442{
6443 Assert(uOffset < sizeof(refPciDev.config));
6444 refPciDev.config[uOffset] = u8Value;
6445}
6446
6447/**
6448 * Sets 16-bit register in PCI configuration space.
6449 * @param refPciDev The PCI device.
6450 * @param uOffset The register offset.
6451 * @param u16Value The value to store in the register.
6452 * @thread EMT
6453 */
6454DECLINLINE(void) e1kPCICfgSetU16(PCIDEVICE& refPciDev, uint32_t uOffset, uint16_t u16Value)
6455{
6456 Assert(uOffset+sizeof(u16Value) <= sizeof(refPciDev.config));
6457 *(uint16_t*)&refPciDev.config[uOffset] = u16Value;
6458}
6459
6460/**
6461 * Sets 32-bit register in PCI configuration space.
6462 * @param refPciDev The PCI device.
6463 * @param uOffset The register offset.
6464 * @param u32Value The value to store in the register.
6465 * @thread EMT
6466 */
6467DECLINLINE(void) e1kPCICfgSetU32(PCIDEVICE& refPciDev, uint32_t uOffset, uint32_t u32Value)
6468{
6469 Assert(uOffset+sizeof(u32Value) <= sizeof(refPciDev.config));
6470 *(uint32_t*)&refPciDev.config[uOffset] = u32Value;
6471}
6472
6473/**
6474 * Set PCI configuration space registers.
6475 *
6476 * @param pci Reference to PCI device structure.
6477 * @thread EMT
6478 */
6479static DECLCALLBACK(void) e1kConfigurePCI(PCIDEVICE& pci, E1KCHIP eChip)
6480{
6481 Assert(eChip < RT_ELEMENTS(g_Chips));
6482 /* Configure PCI Device, assume 32-bit mode ******************************/
6483 PCIDevSetVendorId(&pci, g_Chips[eChip].uPCIVendorId);
6484 PCIDevSetDeviceId(&pci, g_Chips[eChip].uPCIDeviceId);
6485 e1kPCICfgSetU16(pci, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_Chips[eChip].uPCISubsystemVendorId);
6486 e1kPCICfgSetU16(pci, VBOX_PCI_SUBSYSTEM_ID, g_Chips[eChip].uPCISubsystemId);
6487
6488 e1kPCICfgSetU16(pci, VBOX_PCI_COMMAND, 0x0000);
6489 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
6490 e1kPCICfgSetU16(pci, VBOX_PCI_STATUS,
6491 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
6492 /* Stepping A2 */
6493 e1kPCICfgSetU8( pci, VBOX_PCI_REVISION_ID, 0x02);
6494 /* Ethernet adapter */
6495 e1kPCICfgSetU8( pci, VBOX_PCI_CLASS_PROG, 0x00);
6496 e1kPCICfgSetU16(pci, VBOX_PCI_CLASS_DEVICE, 0x0200);
6497 /* normal single function Ethernet controller */
6498 e1kPCICfgSetU8( pci, VBOX_PCI_HEADER_TYPE, 0x00);
6499 /* Memory Register Base Address */
6500 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
6501 /* Memory Flash Base Address */
6502 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
6503 /* IO Register Base Address */
6504 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
6505 /* Expansion ROM Base Address */
6506 e1kPCICfgSetU32(pci, VBOX_PCI_ROM_ADDRESS, 0x00000000);
6507 /* Capabilities Pointer */
6508 e1kPCICfgSetU8( pci, VBOX_PCI_CAPABILITY_LIST, 0xDC);
6509 /* Interrupt Pin: INTA# */
6510 e1kPCICfgSetU8( pci, VBOX_PCI_INTERRUPT_PIN, 0x01);
6511 /* Max_Lat/Min_Gnt: very high priority and time slice */
6512 e1kPCICfgSetU8( pci, VBOX_PCI_MIN_GNT, 0xFF);
6513 e1kPCICfgSetU8( pci, VBOX_PCI_MAX_LAT, 0x00);
6514
6515 /* PCI Power Management Registers ****************************************/
6516 /* Capability ID: PCI Power Management Registers */
6517 e1kPCICfgSetU8( pci, 0xDC, VBOX_PCI_CAP_ID_PM);
6518 /* Next Item Pointer: PCI-X */
6519 e1kPCICfgSetU8( pci, 0xDC + 1, 0xE4);
6520 /* Power Management Capabilities: PM disabled, DSI */
6521 e1kPCICfgSetU16(pci, 0xDC + 2,
6522 0x0002 | VBOX_PCI_PM_CAP_DSI);
6523 /* Power Management Control / Status Register: PM disabled */
6524 e1kPCICfgSetU16(pci, 0xDC + 4, 0x0000);
6525 /* PMCSR_BSE Bridge Support Extensions: Not supported */
6526 e1kPCICfgSetU8( pci, 0xDC + 6, 0x00);
6527 /* Data Register: PM disabled, always 0 */
6528 e1kPCICfgSetU8( pci, 0xDC + 7, 0x00);
6529
6530 /* PCI-X Configuration Registers *****************************************/
6531 /* Capability ID: PCI-X Configuration Registers */
6532 e1kPCICfgSetU8( pci, 0xE4, VBOX_PCI_CAP_ID_PCIX);
6533#ifdef E1K_WITH_MSI
6534 e1kPCICfgSetU8( pci, 0xE4 + 1, 0x80);
6535#else
6536 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
6537 e1kPCICfgSetU8( pci, 0xE4 + 1, 0x00);
6538#endif
6539 /* PCI-X Command: Enable Relaxed Ordering */
6540 e1kPCICfgSetU16(pci, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
6541 /* PCI-X Status: 32-bit, 66MHz*/
6542 /// @todo: is this value really correct? fff8 doesn't look like actual PCI address
6543 e1kPCICfgSetU32(pci, 0xE4 + 4, 0x0040FFF8);
6544}
6545
6546/**
6547 * @interface_method_impl{PDMDEVREG,pfnConstruct}
6548 */
6549static DECLCALLBACK(int) e1kConstruct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
6550{
6551 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6552 int rc;
6553 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
6554
6555 /* Init handles and log related stuff. */
6556 RTStrPrintf(pState->szInstance, sizeof(pState->szInstance), "E1000#%d", iInstance);
6557 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", INSTANCE(pState), sizeof(E1KRXDESC)));
6558 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
6559
6560 /*
6561 * Validate configuration.
6562 */
6563 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
6564 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
6565 "EthernetCRC\0"))
6566 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
6567 N_("Invalid configuration for E1000 device"));
6568
6569 /** @todo: LineSpeed unused! */
6570
6571 pState->fR0Enabled = true;
6572 pState->fGCEnabled = true;
6573 pState->fEthernetCRC = true;
6574
6575 /* Get config params */
6576 rc = CFGMR3QueryBytes(pCfg, "MAC", pState->macConfigured.au8,
6577 sizeof(pState->macConfigured.au8));
6578 if (RT_FAILURE(rc))
6579 return PDMDEV_SET_ERROR(pDevIns, rc,
6580 N_("Configuration error: Failed to get MAC address"));
6581 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pState->fCableConnected);
6582 if (RT_FAILURE(rc))
6583 return PDMDEV_SET_ERROR(pDevIns, rc,
6584 N_("Configuration error: Failed to get the value of 'CableConnected'"));
6585 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pState->eChip);
6586 if (RT_FAILURE(rc))
6587 return PDMDEV_SET_ERROR(pDevIns, rc,
6588 N_("Configuration error: Failed to get the value of 'AdapterType'"));
6589 Assert(pState->eChip <= E1K_CHIP_82545EM);
6590 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pState->fGCEnabled, true);
6591 if (RT_FAILURE(rc))
6592 return PDMDEV_SET_ERROR(pDevIns, rc,
6593 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
6594
6595 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pState->fR0Enabled, true);
6596 if (RT_FAILURE(rc))
6597 return PDMDEV_SET_ERROR(pDevIns, rc,
6598 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
6599
6600 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pState->fEthernetCRC, true);
6601 if (RT_FAILURE(rc))
6602 return PDMDEV_SET_ERROR(pDevIns, rc,
6603 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
6604
6605 E1kLog(("%s Chip=%s\n", INSTANCE(pState), g_Chips[pState->eChip].pcszName));
6606
6607 /* Initialize state structure */
6608 pState->pDevInsR3 = pDevIns;
6609 pState->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
6610 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
6611 pState->u16TxPktLen = 0;
6612 pState->fIPcsum = false;
6613 pState->fTCPcsum = false;
6614 pState->fIntMaskUsed = false;
6615 pState->fDelayInts = false;
6616 pState->fLocked = false;
6617 pState->u64AckedAt = 0;
6618 pState->led.u32Magic = PDMLED_MAGIC;
6619 pState->u32PktNo = 1;
6620
6621#ifdef E1K_INT_STATS
6622 pState->uStatInt = 0;
6623 pState->uStatIntTry = 0;
6624 pState->uStatIntLower = 0;
6625 pState->uStatIntDly = 0;
6626 pState->uStatDisDly = 0;
6627 pState->iStatIntLost = 0;
6628 pState->iStatIntLostOne = 0;
6629 pState->uStatIntLate = 0;
6630 pState->uStatIntMasked = 0;
6631 pState->uStatIntEarly = 0;
6632 pState->uStatIntRx = 0;
6633 pState->uStatIntTx = 0;
6634 pState->uStatIntICS = 0;
6635 pState->uStatIntRDTR = 0;
6636 pState->uStatIntRXDMT0 = 0;
6637 pState->uStatIntTXQE = 0;
6638 pState->uStatTxNoRS = 0;
6639 pState->uStatTxIDE = 0;
6640 pState->uStatTAD = 0;
6641 pState->uStatTID = 0;
6642 pState->uStatRAD = 0;
6643 pState->uStatRID = 0;
6644 pState->uStatRxFrm = 0;
6645 pState->uStatTxFrm = 0;
6646 pState->uStatDescCtx = 0;
6647 pState->uStatDescDat = 0;
6648 pState->uStatDescLeg = 0;
6649#endif /* E1K_INT_STATS */
6650
6651 /* Interfaces */
6652 pState->IBase.pfnQueryInterface = e1kQueryInterface;
6653
6654 pState->INetworkDown.pfnWaitReceiveAvail = e1kNetworkDown_WaitReceiveAvail;
6655 pState->INetworkDown.pfnReceive = e1kNetworkDown_Receive;
6656 pState->INetworkDown.pfnXmitPending = e1kNetworkDown_XmitPending;
6657
6658 pState->ILeds.pfnQueryStatusLed = e1kQueryStatusLed;
6659
6660 pState->INetworkConfig.pfnGetMac = e1kGetMac;
6661 pState->INetworkConfig.pfnGetLinkState = e1kGetLinkState;
6662 pState->INetworkConfig.pfnSetLinkState = e1kSetLinkState;
6663
6664 /* Initialize the EEPROM */
6665 pState->eeprom.init(pState->macConfigured);
6666
6667 /* Initialize internal PHY */
6668 Phy::init(&pState->phy, iInstance,
6669 pState->eChip == E1K_CHIP_82543GC?
6670 PHY_EPID_M881000 : PHY_EPID_M881011);
6671 Phy::setLinkStatus(&pState->phy, pState->fCableConnected);
6672
6673 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
6674 NULL, e1kLiveExec, NULL,
6675 e1kSavePrep, e1kSaveExec, NULL,
6676 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
6677 if (RT_FAILURE(rc))
6678 return rc;
6679
6680 /* Initialize critical section */
6681 rc = PDMDevHlpCritSectInit(pDevIns, &pState->cs, RT_SRC_POS, "%s", pState->szInstance);
6682 if (RT_FAILURE(rc))
6683 return rc;
6684 rc = PDMDevHlpCritSectInit(pDevIns, &pState->csRx, RT_SRC_POS, "%sRX", pState->szInstance);
6685 if (RT_FAILURE(rc))
6686 return rc;
6687
6688 /* Set PCI config registers */
6689 e1kConfigurePCI(pState->pciDevice, pState->eChip);
6690 /* Register PCI device */
6691 rc = PDMDevHlpPCIRegister(pDevIns, &pState->pciDevice);
6692 if (RT_FAILURE(rc))
6693 return rc;
6694
6695#ifdef E1K_WITH_MSI
6696 PDMMSIREG aMsiReg;
6697 aMsiReg.cMsiVectors = 1;
6698 aMsiReg.iMsiCapOffset = 0x80;
6699 aMsiReg.iMsiNextOffset = 0x0;
6700 aMsiReg.fMsi64bit = false;
6701 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg);
6702 AssertRC(rc);
6703 if (RT_FAILURE (rc))
6704 return rc;
6705#endif
6706
6707
6708 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
6709 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE,
6710 PCI_ADDRESS_SPACE_MEM, e1kMap);
6711 if (RT_FAILURE(rc))
6712 return rc;
6713 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
6714 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE,
6715 PCI_ADDRESS_SPACE_IO, e1kMap);
6716 if (RT_FAILURE(rc))
6717 return rc;
6718
6719 /* Create transmit queue */
6720 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
6721 e1kTxQueueConsumer, true, "E1000-Xmit", &pState->pTxQueueR3);
6722 if (RT_FAILURE(rc))
6723 return rc;
6724 pState->pTxQueueR0 = PDMQueueR0Ptr(pState->pTxQueueR3);
6725 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
6726
6727 /* Create the RX notifier signaller. */
6728 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
6729 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pState->pCanRxQueueR3);
6730 if (RT_FAILURE(rc))
6731 return rc;
6732 pState->pCanRxQueueR0 = PDMQueueR0Ptr(pState->pCanRxQueueR3);
6733 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
6734
6735#ifdef E1K_USE_TX_TIMERS
6736 /* Create Transmit Interrupt Delay Timer */
6737 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pState,
6738 TMTIMER_FLAGS_NO_CRIT_SECT,
6739 "E1000 Transmit Interrupt Delay Timer", &pState->pTIDTimerR3);
6740 if (RT_FAILURE(rc))
6741 return rc;
6742 pState->pTIDTimerR0 = TMTimerR0Ptr(pState->pTIDTimerR3);
6743 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
6744
6745# ifndef E1K_NO_TAD
6746 /* Create Transmit Absolute Delay Timer */
6747 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pState,
6748 TMTIMER_FLAGS_NO_CRIT_SECT,
6749 "E1000 Transmit Absolute Delay Timer", &pState->pTADTimerR3);
6750 if (RT_FAILURE(rc))
6751 return rc;
6752 pState->pTADTimerR0 = TMTimerR0Ptr(pState->pTADTimerR3);
6753 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
6754# endif /* E1K_NO_TAD */
6755#endif /* E1K_USE_TX_TIMERS */
6756
6757#ifdef E1K_USE_RX_TIMERS
6758 /* Create Receive Interrupt Delay Timer */
6759 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pState,
6760 TMTIMER_FLAGS_NO_CRIT_SECT,
6761 "E1000 Receive Interrupt Delay Timer", &pState->pRIDTimerR3);
6762 if (RT_FAILURE(rc))
6763 return rc;
6764 pState->pRIDTimerR0 = TMTimerR0Ptr(pState->pRIDTimerR3);
6765 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
6766
6767 /* Create Receive Absolute Delay Timer */
6768 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pState,
6769 TMTIMER_FLAGS_NO_CRIT_SECT,
6770 "E1000 Receive Absolute Delay Timer", &pState->pRADTimerR3);
6771 if (RT_FAILURE(rc))
6772 return rc;
6773 pState->pRADTimerR0 = TMTimerR0Ptr(pState->pRADTimerR3);
6774 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
6775#endif /* E1K_USE_RX_TIMERS */
6776
6777 /* Create Late Interrupt Timer */
6778 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pState,
6779 TMTIMER_FLAGS_NO_CRIT_SECT,
6780 "E1000 Late Interrupt Timer", &pState->pIntTimerR3);
6781 if (RT_FAILURE(rc))
6782 return rc;
6783 pState->pIntTimerR0 = TMTimerR0Ptr(pState->pIntTimerR3);
6784 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
6785
6786 /* Create Link Up Timer */
6787 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pState,
6788 TMTIMER_FLAGS_NO_CRIT_SECT,
6789 "E1000 Link Up Timer", &pState->pLUTimerR3);
6790 if (RT_FAILURE(rc))
6791 return rc;
6792 pState->pLUTimerR0 = TMTimerR0Ptr(pState->pLUTimerR3);
6793 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
6794
6795 /* Register the info item */
6796 char szTmp[20];
6797 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
6798 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
6799
6800 /* Status driver */
6801 PPDMIBASE pBase;
6802 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pState->IBase, &pBase, "Status Port");
6803 if (RT_FAILURE(rc))
6804 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
6805 pState->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
6806
6807 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
6808 if (RT_SUCCESS(rc))
6809 {
6810 if (rc == VINF_NAT_DNS)
6811 {
6812 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6813 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6814 }
6815 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
6816 AssertMsgReturn(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
6817 VERR_PDM_MISSING_INTERFACE_BELOW);
6818
6819 pState->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0), PDMINETWORKUP);
6820 pState->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC), PDMINETWORKUP);
6821 }
6822 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
6823 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
6824 {
6825 /* No error! */
6826 E1kLog(("%s This adapter is not attached to any network!\n", INSTANCE(pState)));
6827 }
6828 else
6829 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
6830
6831 rc = RTSemEventCreate(&pState->hEventMoreRxDescAvail);
6832 if (RT_FAILURE(rc))
6833 return rc;
6834
6835 e1kHardReset(pState);
6836
6837#if defined(VBOX_WITH_STATISTICS)
6838 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
6839 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
6840 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
6841 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
6842 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
6843 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
6844 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
6845 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
6846 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
6847 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
6848 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
6849 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
6850 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
6851 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
6852 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
6853 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
6854 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
6855 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
6856 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
6857 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
6858#endif /* VBOX_WITH_STATISTICS */
6859 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
6860#if defined(VBOX_WITH_STATISTICS)
6861 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
6862 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
6863#endif /* VBOX_WITH_STATISTICS */
6864 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
6865#if defined(VBOX_WITH_STATISTICS)
6866 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
6867 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
6868
6869 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
6870 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
6871 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
6872 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
6873 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
6874 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
6875 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
6876 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
6877 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
6878#endif /* VBOX_WITH_STATISTICS */
6879
6880 return VINF_SUCCESS;
6881}
6882
6883/**
6884 * The device registration structure.
6885 */
6886const PDMDEVREG g_DeviceE1000 =
6887{
6888 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
6889 PDM_DEVREG_VERSION,
6890 /* Device name. */
6891 "e1000",
6892 /* Name of guest context module (no path).
6893 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
6894 "VBoxDDGC.gc",
6895 /* Name of ring-0 module (no path).
6896 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
6897 "VBoxDDR0.r0",
6898 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
6899 * remain unchanged from registration till VM destruction. */
6900 "Intel PRO/1000 MT Desktop Ethernet.\n",
6901
6902 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
6903 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
6904 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
6905 PDM_DEVREG_CLASS_NETWORK,
6906 /* Maximum number of instances (per VM). */
6907 ~0U,
6908 /* Size of the instance data. */
6909 sizeof(E1KSTATE),
6910
6911 /* Construct instance - required. */
6912 e1kConstruct,
6913 /* Destruct instance - optional. */
6914 e1kDestruct,
6915 /* Relocation command - optional. */
6916 e1kRelocate,
6917 /* I/O Control interface - optional. */
6918 NULL,
6919 /* Power on notification - optional. */
6920 NULL,
6921 /* Reset notification - optional. */
6922 e1kReset,
6923 /* Suspend notification - optional. */
6924 e1kSuspend,
6925 /* Resume notification - optional. */
6926 NULL,
6927 /* Attach command - optional. */
6928 e1kAttach,
6929 /* Detach notification - optional. */
6930 e1kDetach,
6931 /* Query a LUN base interface - optional. */
6932 NULL,
6933 /* Init complete notification - optional. */
6934 NULL,
6935 /* Power off notification - optional. */
6936 e1kPowerOff,
6937 /* pfnSoftReset */
6938 NULL,
6939 /* u32VersionEnd */
6940 PDM_DEVREG_VERSION
6941};
6942
6943#endif /* IN_RING3 */
6944#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette