VirtualBox

source: vbox/trunk/src/VBox/Devices/Network/DevE1000.cpp@ 41808

Last change on this file since 41808 was 41808, checked in by vboxsync, 12 years ago

e1000: fixed assertion in debug build (#5582)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 293.0 KB
Line 
1/* $Id: DevE1000.cpp 41808 2012-06-18 06:11:24Z vboxsync $ */
2/** @file
3 * DevE1000 - Intel 82540EM Ethernet Controller Emulation.
4 *
5 * Implemented in accordance with the specification:
6 *
7 * PCI/PCI-X Family of Gigabit Ethernet Controllers Software Developer's Manual
8 * 82540EP/EM, 82541xx, 82544GC/EI, 82545GM/EM, 82546GB/EB, and 82547xx
9 *
10 * 317453-002 Revision 3.5
11 *
12 * @todo IPv6 checksum offloading support
13 * @todo Flexible Filter / Wakeup (optional?)
14 */
15
16/*
17 * Copyright (C) 2007-2011 Oracle Corporation
18 *
19 * This file is part of VirtualBox Open Source Edition (OSE), as
20 * available from http://www.virtualbox.org. This file is free software;
21 * you can redistribute it and/or modify it under the terms of the GNU
22 * General Public License (GPL) as published by the Free Software
23 * Foundation, in version 2 as it comes in the "COPYING" file of the
24 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
25 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
26 */
27
28#define LOG_GROUP LOG_GROUP_DEV_E1000
29
30//#define E1kLogRel(a) LogRel(a)
31#define E1kLogRel(a)
32
33/* Options *******************************************************************/
34/*
35 * E1K_INIT_RA0 forces E1000 to set the first entry in Receive Address filter
36 * table to MAC address obtained from CFGM. Most guests read MAC address from
37 * EEPROM and write it to RA[0] explicitly, but Mac OS X seems to depend on it
38 * being already set (see @bugref{4657}).
39 */
40#define E1K_INIT_RA0
41/*
42 * E1K_LSC_ON_SLU causes E1000 to generate Link Status Change interrupt when
43 * the guest driver brings up the link via STATUS.LU bit. Again the only guest
44 * that requires it is Mac OS X (see @bugref{4657}).
45 */
46#define E1K_LSC_ON_SLU
47/*
48 * E1K_ITR_ENABLED reduces the number of interrupts generated by E1000 if a
49 * guest driver requested it by writing non-zero value to the Interrupt
50 * Throttling Register (see section 13.4.18 in "8254x Family of Gigabit
51 * Ethernet Controllers Software Developer’s Manual").
52 */
53#define E1K_ITR_ENABLED
54/*
55 * E1K_USE_TX_TIMERS aims to reduce the number of generated TX interrupts if a
56 * guest driver set the delays via the Transmit Interrupt Delay Value (TIDV)
57 * register. Enabling it showed no positive effects on existing guests so it
58 * stays disabled. See sections 3.2.7.1 and 3.4.3.1 in "8254x Family of Gigabit
59 * Ethernet Controllers Software Developer’s Manual" for more detailed
60 * explanation.
61 */
62//#define E1K_USE_TX_TIMERS
63/*
64 * E1K_NO_TAD disables one of two timers enabled by E1K_USE_TX_TIMERS, the
65 * Transmit Absolute Delay time. This timer sets the maximum time interval
66 * during which TX interrupts can be postponed (delayed). It has no effect
67 * if E1K_USE_TX_TIMERS is not defined.
68 */
69//#define E1K_NO_TAD
70/*
71 * E1K_REL_DEBUG enables debug logging of l1, l2, l3 in release build.
72 */
73//#define E1K_REL_DEBUG
74/*
75 * E1K_INT_STATS enables collection of internal statistics used for
76 * debugging of delayed interrupts, etc.
77 */
78//#define E1K_INT_STATS
79/*
80 * E1K_WITH_MSI enables rudimentary MSI support. Not implemented.
81 */
82//#define E1K_WITH_MSI
83/*
84 * E1K_WITH_TX_CS protects e1kXmitPending with a critical section.
85 */
86#define E1K_WITH_TX_CS
87/*
88 * E1K_WITH_TXD_CACHE causes E1000 to fetch multiple TX descriptors in a
89 * single physical memory read (or two if it wraps around the end of TX
90 * descriptor ring). It is required for proper functioning of bandwidth
91 * resource control as it allows to compute exact sizes of packets prior
92 * to allocating their buffers (see @bugref{5582}).
93 */
94#define E1K_WITH_TXD_CACHE
95/*
96 * E1K_WITH_RXD_CACHE causes E1000 to fetch multiple RX descriptors in a
97 * single physical memory read (or two if it wraps around the end of RX
98 * descriptor ring). Intel's packet driver for DOS needs this option in
99 * order to work properly (see @bugref{6217}).
100 */
101#define E1K_WITH_RXD_CACHE
102/* End of Options ************************************************************/
103
104#ifdef E1K_WITH_TXD_CACHE
105/*
106 * E1K_TXD_CACHE_SIZE specifies the maximum number of TX descriptors stored
107 * in the state structure. It limits the amount of descriptors loaded in one
108 * batch read. For example, Linux guest may use up to 20 descriptors per
109 * TSE packet.
110 */
111#define E1K_TXD_CACHE_SIZE 32u
112#endif /* E1K_WITH_TXD_CACHE */
113
114#ifdef E1K_WITH_RXD_CACHE
115/*
116 * E1K_RXD_CACHE_SIZE specifies the maximum number of RX descriptors stored
117 * in the state structure. It limits the amount of descriptors loaded in one
118 * batch read. For example, XP guest adds 15 RX descriptors at a time.
119 */
120#define E1K_RXD_CACHE_SIZE 16u
121#endif /* E1K_WITH_RXD_CACHE */
122
123#include <iprt/crc.h>
124#include <iprt/ctype.h>
125#include <iprt/net.h>
126#include <iprt/semaphore.h>
127#include <iprt/string.h>
128#include <iprt/uuid.h>
129#include <VBox/vmm/pdmdev.h>
130#include <VBox/vmm/pdmnetifs.h>
131#include <VBox/vmm/pdmnetinline.h>
132#include <VBox/param.h>
133#include "VBoxDD.h"
134
135#include "DevEEPROM.h"
136#include "DevE1000Phy.h"
137
138/* Little helpers ************************************************************/
139#undef htons
140#undef ntohs
141#undef htonl
142#undef ntohl
143#define htons(x) ((((x) & 0xff00) >> 8) | (((x) & 0x00ff) << 8))
144#define ntohs(x) htons(x)
145#define htonl(x) ASMByteSwapU32(x)
146#define ntohl(x) htonl(x)
147
148#ifndef DEBUG
149# ifdef E1K_REL_DEBUG
150# define DEBUG
151# define E1kLog(a) LogRel(a)
152# define E1kLog2(a) LogRel(a)
153# define E1kLog3(a) LogRel(a)
154# define E1kLogX(x, a) LogRel(a)
155//# define E1kLog3(a) do {} while (0)
156# else
157# define E1kLog(a) do {} while (0)
158# define E1kLog2(a) do {} while (0)
159# define E1kLog3(a) do {} while (0)
160# define E1kLogX(x, a) do {} while (0)
161# endif
162#else
163# define E1kLog(a) Log(a)
164# define E1kLog2(a) Log2(a)
165# define E1kLog3(a) Log3(a)
166# define E1kLogX(x, a) LogIt(LOG_INSTANCE, x, LOG_GROUP, a)
167//# define E1kLog(a) do {} while (0)
168//# define E1kLog2(a) do {} while (0)
169//# define E1kLog3(a) do {} while (0)
170#endif
171
172//#undef DEBUG
173
174#define INSTANCE(pState) pState->szInstance
175#define STATE_TO_DEVINS(pState) (((E1KSTATE *)pState)->CTX_SUFF(pDevIns))
176#define E1K_RELOCATE(p, o) *(RTHCUINTPTR *)&p += o
177
178#define E1K_INC_CNT32(cnt) \
179do { \
180 if (cnt < UINT32_MAX) \
181 cnt++; \
182} while (0)
183
184#define E1K_ADD_CNT64(cntLo, cntHi, val) \
185do { \
186 uint64_t u64Cnt = RT_MAKE_U64(cntLo, cntHi); \
187 uint64_t tmp = u64Cnt; \
188 u64Cnt += val; \
189 if (tmp > u64Cnt ) \
190 u64Cnt = UINT64_MAX; \
191 cntLo = (uint32_t)u64Cnt; \
192 cntHi = (uint32_t)(u64Cnt >> 32); \
193} while (0)
194
195#ifdef E1K_INT_STATS
196# define E1K_INC_ISTAT_CNT(cnt) ++cnt
197#else /* E1K_INT_STATS */
198# define E1K_INC_ISTAT_CNT(cnt)
199#endif /* E1K_INT_STATS */
200
201
202/*****************************************************************************/
203
204typedef uint32_t E1KCHIP;
205#define E1K_CHIP_82540EM 0
206#define E1K_CHIP_82543GC 1
207#define E1K_CHIP_82545EM 2
208
209struct E1kChips
210{
211 uint16_t uPCIVendorId;
212 uint16_t uPCIDeviceId;
213 uint16_t uPCISubsystemVendorId;
214 uint16_t uPCISubsystemId;
215 const char *pcszName;
216} g_Chips[] =
217{
218 /* Vendor Device SSVendor SubSys Name */
219 { 0x8086,
220 /* Temporary code, as MSI-aware driver dislike 0x100E. How to do that right? */
221#ifdef E1K_WITH_MSI
222 0x105E,
223#else
224 0x100E,
225#endif
226 0x8086, 0x001E, "82540EM" }, /* Intel 82540EM-A in Intel PRO/1000 MT Desktop */
227 { 0x8086, 0x1004, 0x8086, 0x1004, "82543GC" }, /* Intel 82543GC in Intel PRO/1000 T Server */
228 { 0x8086, 0x100F, 0x15AD, 0x0750, "82545EM" } /* Intel 82545EM-A in VMWare Network Adapter */
229};
230
231
232/* The size of register area mapped to I/O space */
233#define E1K_IOPORT_SIZE 0x8
234/* The size of memory-mapped register area */
235#define E1K_MM_SIZE 0x20000
236
237#define E1K_MAX_TX_PKT_SIZE 16288
238#define E1K_MAX_RX_PKT_SIZE 16384
239
240/*****************************************************************************/
241
242/** Gets the specfieid bits from the register. */
243#define GET_BITS(reg, bits) ((reg & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
244#define GET_BITS_V(val, reg, bits) ((val & reg##_##bits##_MASK) >> reg##_##bits##_SHIFT)
245#define BITS(reg, bits, bitval) (bitval << reg##_##bits##_SHIFT)
246#define SET_BITS(reg, bits, bitval) do { reg = (reg & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
247#define SET_BITS_V(val, reg, bits, bitval) do { val = (val & ~reg##_##bits##_MASK) | (bitval << reg##_##bits##_SHIFT); } while (0)
248
249#define CTRL_SLU 0x00000040
250#define CTRL_MDIO 0x00100000
251#define CTRL_MDC 0x00200000
252#define CTRL_MDIO_DIR 0x01000000
253#define CTRL_MDC_DIR 0x02000000
254#define CTRL_RESET 0x04000000
255#define CTRL_VME 0x40000000
256
257#define STATUS_LU 0x00000002
258#define STATUS_TXOFF 0x00000010
259
260#define EECD_EE_WIRES 0x0F
261#define EECD_EE_REQ 0x40
262#define EECD_EE_GNT 0x80
263
264#define EERD_START 0x00000001
265#define EERD_DONE 0x00000010
266#define EERD_DATA_MASK 0xFFFF0000
267#define EERD_DATA_SHIFT 16
268#define EERD_ADDR_MASK 0x0000FF00
269#define EERD_ADDR_SHIFT 8
270
271#define MDIC_DATA_MASK 0x0000FFFF
272#define MDIC_DATA_SHIFT 0
273#define MDIC_REG_MASK 0x001F0000
274#define MDIC_REG_SHIFT 16
275#define MDIC_PHY_MASK 0x03E00000
276#define MDIC_PHY_SHIFT 21
277#define MDIC_OP_WRITE 0x04000000
278#define MDIC_OP_READ 0x08000000
279#define MDIC_READY 0x10000000
280#define MDIC_INT_EN 0x20000000
281#define MDIC_ERROR 0x40000000
282
283#define TCTL_EN 0x00000002
284#define TCTL_PSP 0x00000008
285
286#define RCTL_EN 0x00000002
287#define RCTL_UPE 0x00000008
288#define RCTL_MPE 0x00000010
289#define RCTL_LPE 0x00000020
290#define RCTL_LBM_MASK 0x000000C0
291#define RCTL_LBM_SHIFT 6
292#define RCTL_RDMTS_MASK 0x00000300
293#define RCTL_RDMTS_SHIFT 8
294#define RCTL_LBM_TCVR 3 /**< PHY or external SerDes loopback. */
295#define RCTL_MO_MASK 0x00003000
296#define RCTL_MO_SHIFT 12
297#define RCTL_BAM 0x00008000
298#define RCTL_BSIZE_MASK 0x00030000
299#define RCTL_BSIZE_SHIFT 16
300#define RCTL_VFE 0x00040000
301#define RCTL_CFIEN 0x00080000
302#define RCTL_CFI 0x00100000
303#define RCTL_BSEX 0x02000000
304#define RCTL_SECRC 0x04000000
305
306#define ICR_TXDW 0x00000001
307#define ICR_TXQE 0x00000002
308#define ICR_LSC 0x00000004
309#define ICR_RXDMT0 0x00000010
310#define ICR_RXT0 0x00000080
311#define ICR_TXD_LOW 0x00008000
312#define RDTR_FPD 0x80000000
313
314#define PBA_st ((PBAST*)(pState->auRegs + PBA_IDX))
315typedef struct
316{
317 unsigned rxa : 7;
318 unsigned rxa_r : 9;
319 unsigned txa : 16;
320} PBAST;
321AssertCompileSize(PBAST, 4);
322
323#define TXDCTL_WTHRESH_MASK 0x003F0000
324#define TXDCTL_WTHRESH_SHIFT 16
325#define TXDCTL_LWTHRESH_MASK 0xFE000000
326#define TXDCTL_LWTHRESH_SHIFT 25
327
328#define RXCSUM_PCSS_MASK 0x000000FF
329#define RXCSUM_PCSS_SHIFT 0
330
331/* Register access macros ****************************************************/
332#define CTRL pState->auRegs[CTRL_IDX]
333#define STATUS pState->auRegs[STATUS_IDX]
334#define EECD pState->auRegs[EECD_IDX]
335#define EERD pState->auRegs[EERD_IDX]
336#define CTRL_EXT pState->auRegs[CTRL_EXT_IDX]
337#define FLA pState->auRegs[FLA_IDX]
338#define MDIC pState->auRegs[MDIC_IDX]
339#define FCAL pState->auRegs[FCAL_IDX]
340#define FCAH pState->auRegs[FCAH_IDX]
341#define FCT pState->auRegs[FCT_IDX]
342#define VET pState->auRegs[VET_IDX]
343#define ICR pState->auRegs[ICR_IDX]
344#define ITR pState->auRegs[ITR_IDX]
345#define ICS pState->auRegs[ICS_IDX]
346#define IMS pState->auRegs[IMS_IDX]
347#define IMC pState->auRegs[IMC_IDX]
348#define RCTL pState->auRegs[RCTL_IDX]
349#define FCTTV pState->auRegs[FCTTV_IDX]
350#define TXCW pState->auRegs[TXCW_IDX]
351#define RXCW pState->auRegs[RXCW_IDX]
352#define TCTL pState->auRegs[TCTL_IDX]
353#define TIPG pState->auRegs[TIPG_IDX]
354#define AIFS pState->auRegs[AIFS_IDX]
355#define LEDCTL pState->auRegs[LEDCTL_IDX]
356#define PBA pState->auRegs[PBA_IDX]
357#define FCRTL pState->auRegs[FCRTL_IDX]
358#define FCRTH pState->auRegs[FCRTH_IDX]
359#define RDFH pState->auRegs[RDFH_IDX]
360#define RDFT pState->auRegs[RDFT_IDX]
361#define RDFHS pState->auRegs[RDFHS_IDX]
362#define RDFTS pState->auRegs[RDFTS_IDX]
363#define RDFPC pState->auRegs[RDFPC_IDX]
364#define RDBAL pState->auRegs[RDBAL_IDX]
365#define RDBAH pState->auRegs[RDBAH_IDX]
366#define RDLEN pState->auRegs[RDLEN_IDX]
367#define RDH pState->auRegs[RDH_IDX]
368#define RDT pState->auRegs[RDT_IDX]
369#define RDTR pState->auRegs[RDTR_IDX]
370#define RXDCTL pState->auRegs[RXDCTL_IDX]
371#define RADV pState->auRegs[RADV_IDX]
372#define RSRPD pState->auRegs[RSRPD_IDX]
373#define TXDMAC pState->auRegs[TXDMAC_IDX]
374#define TDFH pState->auRegs[TDFH_IDX]
375#define TDFT pState->auRegs[TDFT_IDX]
376#define TDFHS pState->auRegs[TDFHS_IDX]
377#define TDFTS pState->auRegs[TDFTS_IDX]
378#define TDFPC pState->auRegs[TDFPC_IDX]
379#define TDBAL pState->auRegs[TDBAL_IDX]
380#define TDBAH pState->auRegs[TDBAH_IDX]
381#define TDLEN pState->auRegs[TDLEN_IDX]
382#define TDH pState->auRegs[TDH_IDX]
383#define TDT pState->auRegs[TDT_IDX]
384#define TIDV pState->auRegs[TIDV_IDX]
385#define TXDCTL pState->auRegs[TXDCTL_IDX]
386#define TADV pState->auRegs[TADV_IDX]
387#define TSPMT pState->auRegs[TSPMT_IDX]
388#define CRCERRS pState->auRegs[CRCERRS_IDX]
389#define ALGNERRC pState->auRegs[ALGNERRC_IDX]
390#define SYMERRS pState->auRegs[SYMERRS_IDX]
391#define RXERRC pState->auRegs[RXERRC_IDX]
392#define MPC pState->auRegs[MPC_IDX]
393#define SCC pState->auRegs[SCC_IDX]
394#define ECOL pState->auRegs[ECOL_IDX]
395#define MCC pState->auRegs[MCC_IDX]
396#define LATECOL pState->auRegs[LATECOL_IDX]
397#define COLC pState->auRegs[COLC_IDX]
398#define DC pState->auRegs[DC_IDX]
399#define TNCRS pState->auRegs[TNCRS_IDX]
400#define SEC pState->auRegs[SEC_IDX]
401#define CEXTERR pState->auRegs[CEXTERR_IDX]
402#define RLEC pState->auRegs[RLEC_IDX]
403#define XONRXC pState->auRegs[XONRXC_IDX]
404#define XONTXC pState->auRegs[XONTXC_IDX]
405#define XOFFRXC pState->auRegs[XOFFRXC_IDX]
406#define XOFFTXC pState->auRegs[XOFFTXC_IDX]
407#define FCRUC pState->auRegs[FCRUC_IDX]
408#define PRC64 pState->auRegs[PRC64_IDX]
409#define PRC127 pState->auRegs[PRC127_IDX]
410#define PRC255 pState->auRegs[PRC255_IDX]
411#define PRC511 pState->auRegs[PRC511_IDX]
412#define PRC1023 pState->auRegs[PRC1023_IDX]
413#define PRC1522 pState->auRegs[PRC1522_IDX]
414#define GPRC pState->auRegs[GPRC_IDX]
415#define BPRC pState->auRegs[BPRC_IDX]
416#define MPRC pState->auRegs[MPRC_IDX]
417#define GPTC pState->auRegs[GPTC_IDX]
418#define GORCL pState->auRegs[GORCL_IDX]
419#define GORCH pState->auRegs[GORCH_IDX]
420#define GOTCL pState->auRegs[GOTCL_IDX]
421#define GOTCH pState->auRegs[GOTCH_IDX]
422#define RNBC pState->auRegs[RNBC_IDX]
423#define RUC pState->auRegs[RUC_IDX]
424#define RFC pState->auRegs[RFC_IDX]
425#define ROC pState->auRegs[ROC_IDX]
426#define RJC pState->auRegs[RJC_IDX]
427#define MGTPRC pState->auRegs[MGTPRC_IDX]
428#define MGTPDC pState->auRegs[MGTPDC_IDX]
429#define MGTPTC pState->auRegs[MGTPTC_IDX]
430#define TORL pState->auRegs[TORL_IDX]
431#define TORH pState->auRegs[TORH_IDX]
432#define TOTL pState->auRegs[TOTL_IDX]
433#define TOTH pState->auRegs[TOTH_IDX]
434#define TPR pState->auRegs[TPR_IDX]
435#define TPT pState->auRegs[TPT_IDX]
436#define PTC64 pState->auRegs[PTC64_IDX]
437#define PTC127 pState->auRegs[PTC127_IDX]
438#define PTC255 pState->auRegs[PTC255_IDX]
439#define PTC511 pState->auRegs[PTC511_IDX]
440#define PTC1023 pState->auRegs[PTC1023_IDX]
441#define PTC1522 pState->auRegs[PTC1522_IDX]
442#define MPTC pState->auRegs[MPTC_IDX]
443#define BPTC pState->auRegs[BPTC_IDX]
444#define TSCTC pState->auRegs[TSCTC_IDX]
445#define TSCTFC pState->auRegs[TSCTFC_IDX]
446#define RXCSUM pState->auRegs[RXCSUM_IDX]
447#define WUC pState->auRegs[WUC_IDX]
448#define WUFC pState->auRegs[WUFC_IDX]
449#define WUS pState->auRegs[WUS_IDX]
450#define MANC pState->auRegs[MANC_IDX]
451#define IPAV pState->auRegs[IPAV_IDX]
452#define WUPL pState->auRegs[WUPL_IDX]
453
454/**
455 * Indices of memory-mapped registers in register table
456 */
457typedef enum
458{
459 CTRL_IDX,
460 STATUS_IDX,
461 EECD_IDX,
462 EERD_IDX,
463 CTRL_EXT_IDX,
464 FLA_IDX,
465 MDIC_IDX,
466 FCAL_IDX,
467 FCAH_IDX,
468 FCT_IDX,
469 VET_IDX,
470 ICR_IDX,
471 ITR_IDX,
472 ICS_IDX,
473 IMS_IDX,
474 IMC_IDX,
475 RCTL_IDX,
476 FCTTV_IDX,
477 TXCW_IDX,
478 RXCW_IDX,
479 TCTL_IDX,
480 TIPG_IDX,
481 AIFS_IDX,
482 LEDCTL_IDX,
483 PBA_IDX,
484 FCRTL_IDX,
485 FCRTH_IDX,
486 RDFH_IDX,
487 RDFT_IDX,
488 RDFHS_IDX,
489 RDFTS_IDX,
490 RDFPC_IDX,
491 RDBAL_IDX,
492 RDBAH_IDX,
493 RDLEN_IDX,
494 RDH_IDX,
495 RDT_IDX,
496 RDTR_IDX,
497 RXDCTL_IDX,
498 RADV_IDX,
499 RSRPD_IDX,
500 TXDMAC_IDX,
501 TDFH_IDX,
502 TDFT_IDX,
503 TDFHS_IDX,
504 TDFTS_IDX,
505 TDFPC_IDX,
506 TDBAL_IDX,
507 TDBAH_IDX,
508 TDLEN_IDX,
509 TDH_IDX,
510 TDT_IDX,
511 TIDV_IDX,
512 TXDCTL_IDX,
513 TADV_IDX,
514 TSPMT_IDX,
515 CRCERRS_IDX,
516 ALGNERRC_IDX,
517 SYMERRS_IDX,
518 RXERRC_IDX,
519 MPC_IDX,
520 SCC_IDX,
521 ECOL_IDX,
522 MCC_IDX,
523 LATECOL_IDX,
524 COLC_IDX,
525 DC_IDX,
526 TNCRS_IDX,
527 SEC_IDX,
528 CEXTERR_IDX,
529 RLEC_IDX,
530 XONRXC_IDX,
531 XONTXC_IDX,
532 XOFFRXC_IDX,
533 XOFFTXC_IDX,
534 FCRUC_IDX,
535 PRC64_IDX,
536 PRC127_IDX,
537 PRC255_IDX,
538 PRC511_IDX,
539 PRC1023_IDX,
540 PRC1522_IDX,
541 GPRC_IDX,
542 BPRC_IDX,
543 MPRC_IDX,
544 GPTC_IDX,
545 GORCL_IDX,
546 GORCH_IDX,
547 GOTCL_IDX,
548 GOTCH_IDX,
549 RNBC_IDX,
550 RUC_IDX,
551 RFC_IDX,
552 ROC_IDX,
553 RJC_IDX,
554 MGTPRC_IDX,
555 MGTPDC_IDX,
556 MGTPTC_IDX,
557 TORL_IDX,
558 TORH_IDX,
559 TOTL_IDX,
560 TOTH_IDX,
561 TPR_IDX,
562 TPT_IDX,
563 PTC64_IDX,
564 PTC127_IDX,
565 PTC255_IDX,
566 PTC511_IDX,
567 PTC1023_IDX,
568 PTC1522_IDX,
569 MPTC_IDX,
570 BPTC_IDX,
571 TSCTC_IDX,
572 TSCTFC_IDX,
573 RXCSUM_IDX,
574 WUC_IDX,
575 WUFC_IDX,
576 WUS_IDX,
577 MANC_IDX,
578 IPAV_IDX,
579 WUPL_IDX,
580 MTA_IDX,
581 RA_IDX,
582 VFTA_IDX,
583 IP4AT_IDX,
584 IP6AT_IDX,
585 WUPM_IDX,
586 FFLT_IDX,
587 FFMT_IDX,
588 FFVT_IDX,
589 PBM_IDX,
590 RA_82542_IDX,
591 MTA_82542_IDX,
592 VFTA_82542_IDX,
593 E1K_NUM_OF_REGS
594} E1kRegIndex;
595
596#define E1K_NUM_OF_32BIT_REGS MTA_IDX
597
598
599/**
600 * Define E1000-specific EEPROM layout.
601 */
602class E1kEEPROM
603{
604 public:
605 EEPROM93C46 eeprom;
606
607#ifdef IN_RING3
608 /**
609 * Initialize EEPROM content.
610 *
611 * @param macAddr MAC address of E1000.
612 */
613 void init(RTMAC &macAddr)
614 {
615 eeprom.init();
616 memcpy(eeprom.m_au16Data, macAddr.au16, sizeof(macAddr.au16));
617 eeprom.m_au16Data[0x04] = 0xFFFF;
618 /*
619 * bit 3 - full support for power management
620 * bit 10 - full duplex
621 */
622 eeprom.m_au16Data[0x0A] = 0x4408;
623 eeprom.m_au16Data[0x0B] = 0x001E;
624 eeprom.m_au16Data[0x0C] = 0x8086;
625 eeprom.m_au16Data[0x0D] = 0x100E;
626 eeprom.m_au16Data[0x0E] = 0x8086;
627 eeprom.m_au16Data[0x0F] = 0x3040;
628 eeprom.m_au16Data[0x21] = 0x7061;
629 eeprom.m_au16Data[0x22] = 0x280C;
630 eeprom.m_au16Data[0x23] = 0x00C8;
631 eeprom.m_au16Data[0x24] = 0x00C8;
632 eeprom.m_au16Data[0x2F] = 0x0602;
633 updateChecksum();
634 };
635
636 /**
637 * Compute the checksum as required by E1000 and store it
638 * in the last word.
639 */
640 void updateChecksum()
641 {
642 uint16_t u16Checksum = 0;
643
644 for (int i = 0; i < eeprom.SIZE-1; i++)
645 u16Checksum += eeprom.m_au16Data[i];
646 eeprom.m_au16Data[eeprom.SIZE-1] = 0xBABA - u16Checksum;
647 };
648
649 /**
650 * First 6 bytes of EEPROM contain MAC address.
651 *
652 * @returns MAC address of E1000.
653 */
654 void getMac(PRTMAC pMac)
655 {
656 memcpy(pMac->au16, eeprom.m_au16Data, sizeof(pMac->au16));
657 };
658
659 uint32_t read()
660 {
661 return eeprom.read();
662 }
663
664 void write(uint32_t u32Wires)
665 {
666 eeprom.write(u32Wires);
667 }
668
669 bool readWord(uint32_t u32Addr, uint16_t *pu16Value)
670 {
671 return eeprom.readWord(u32Addr, pu16Value);
672 }
673
674 int load(PSSMHANDLE pSSM)
675 {
676 return eeprom.load(pSSM);
677 }
678
679 void save(PSSMHANDLE pSSM)
680 {
681 eeprom.save(pSSM);
682 }
683#endif /* IN_RING3 */
684};
685
686
687#define E1K_SPEC_VLAN(s) (s & 0xFFF)
688#define E1K_SPEC_CFI(s) (!!((s>>12) & 0x1))
689#define E1K_SPEC_PRI(s) ((s>>13) & 0x7)
690
691struct E1kRxDStatus
692{
693 /** @name Descriptor Status field (3.2.3.1)
694 * @{ */
695 unsigned fDD : 1; /**< Descriptor Done. */
696 unsigned fEOP : 1; /**< End of packet. */
697 unsigned fIXSM : 1; /**< Ignore checksum indication. */
698 unsigned fVP : 1; /**< VLAN, matches VET. */
699 unsigned : 1;
700 unsigned fTCPCS : 1; /**< RCP Checksum calculated on the packet. */
701 unsigned fIPCS : 1; /**< IP Checksum calculated on the packet. */
702 unsigned fPIF : 1; /**< Passed in-exact filter */
703 /** @} */
704 /** @name Descriptor Errors field (3.2.3.2)
705 * (Only valid when fEOP and fDD are set.)
706 * @{ */
707 unsigned fCE : 1; /**< CRC or alignment error. */
708 unsigned : 4; /**< Reserved, varies with different models... */
709 unsigned fTCPE : 1; /**< TCP/UDP checksum error. */
710 unsigned fIPE : 1; /**< IP Checksum error. */
711 unsigned fRXE : 1; /**< RX Data error. */
712 /** @} */
713 /** @name Descriptor Special field (3.2.3.3)
714 * @{ */
715 unsigned u16Special : 16; /**< VLAN: Id, Canonical form, Priority. */
716 /** @} */
717};
718typedef struct E1kRxDStatus E1KRXDST;
719
720struct E1kRxDesc_st
721{
722 uint64_t u64BufAddr; /**< Address of data buffer */
723 uint16_t u16Length; /**< Length of data in buffer */
724 uint16_t u16Checksum; /**< Packet checksum */
725 E1KRXDST status;
726};
727typedef struct E1kRxDesc_st E1KRXDESC;
728AssertCompileSize(E1KRXDESC, 16);
729
730#define E1K_DTYP_LEGACY -1
731#define E1K_DTYP_CONTEXT 0
732#define E1K_DTYP_DATA 1
733
734struct E1kTDLegacy
735{
736 uint64_t u64BufAddr; /**< Address of data buffer */
737 struct TDLCmd_st
738 {
739 unsigned u16Length : 16;
740 unsigned u8CSO : 8;
741 /* CMD field : 8 */
742 unsigned fEOP : 1;
743 unsigned fIFCS : 1;
744 unsigned fIC : 1;
745 unsigned fRS : 1;
746 unsigned fRPS : 1;
747 unsigned fDEXT : 1;
748 unsigned fVLE : 1;
749 unsigned fIDE : 1;
750 } cmd;
751 struct TDLDw3_st
752 {
753 /* STA field */
754 unsigned fDD : 1;
755 unsigned fEC : 1;
756 unsigned fLC : 1;
757 unsigned fTURSV : 1;
758 /* RSV field */
759 unsigned u4RSV : 4;
760 /* CSS field */
761 unsigned u8CSS : 8;
762 /* Special field*/
763 unsigned u16Special: 16;
764 } dw3;
765};
766
767/**
768 * TCP/IP Context Transmit Descriptor, section 3.3.6.
769 */
770struct E1kTDContext
771{
772 struct CheckSum_st
773 {
774 /** TSE: Header start. !TSE: Checksum start. */
775 unsigned u8CSS : 8;
776 /** Checksum offset - where to store it. */
777 unsigned u8CSO : 8;
778 /** Checksum ending (inclusive) offset, 0 = end of packet. */
779 unsigned u16CSE : 16;
780 } ip;
781 struct CheckSum_st tu;
782 struct TDCDw2_st
783 {
784 /** TSE: The total number of payload bytes for this context. Sans header. */
785 unsigned u20PAYLEN : 20;
786 /** The descriptor type - E1K_DTYP_CONTEXT (0). */
787 unsigned u4DTYP : 4;
788 /** TUCMD field, 8 bits
789 * @{ */
790 /** TSE: TCP (set) or UDP (clear). */
791 unsigned fTCP : 1;
792 /** TSE: IPv4 (set) or IPv6 (clear) - for finding the payload length field in
793 * the IP header. Does not affect the checksumming.
794 * @remarks 82544GC/EI interprets a cleared field differently. */
795 unsigned fIP : 1;
796 /** TSE: TCP segmentation enable. When clear the context describes */
797 unsigned fTSE : 1;
798 /** Report status (only applies to dw3.fDD for here). */
799 unsigned fRS : 1;
800 /** Reserved, MBZ. */
801 unsigned fRSV1 : 1;
802 /** Descriptor extension, must be set for this descriptor type. */
803 unsigned fDEXT : 1;
804 /** Reserved, MBZ. */
805 unsigned fRSV2 : 1;
806 /** Interrupt delay enable. */
807 unsigned fIDE : 1;
808 /** @} */
809 } dw2;
810 struct TDCDw3_st
811 {
812 /** Descriptor Done. */
813 unsigned fDD : 1;
814 /** Reserved, MBZ. */
815 unsigned u7RSV : 7;
816 /** TSO: The header (prototype) length (Ethernet[, VLAN tag], IP, TCP/UDP. */
817 unsigned u8HDRLEN : 8;
818 /** TSO: Maximum segment size. */
819 unsigned u16MSS : 16;
820 } dw3;
821};
822typedef struct E1kTDContext E1KTXCTX;
823
824/**
825 * TCP/IP Data Transmit Descriptor, section 3.3.7.
826 */
827struct E1kTDData
828{
829 uint64_t u64BufAddr; /**< Address of data buffer */
830 struct TDDCmd_st
831 {
832 /** The total length of data pointed to by this descriptor. */
833 unsigned u20DTALEN : 20;
834 /** The descriptor type - E1K_DTYP_DATA (1). */
835 unsigned u4DTYP : 4;
836 /** @name DCMD field, 8 bits (3.3.7.1).
837 * @{ */
838 /** End of packet. Note TSCTFC update. */
839 unsigned fEOP : 1;
840 /** Insert Ethernet FCS/CRC (requires fEOP to be set). */
841 unsigned fIFCS : 1;
842 /** Use the TSE context when set and the normal when clear. */
843 unsigned fTSE : 1;
844 /** Report status (dw3.STA). */
845 unsigned fRS : 1;
846 /** Reserved. 82544GC/EI defines this report packet set (RPS). */
847 unsigned fRPS : 1;
848 /** Descriptor extension, must be set for this descriptor type. */
849 unsigned fDEXT : 1;
850 /** VLAN enable, requires CTRL.VME, auto enables FCS/CRC.
851 * Insert dw3.SPECIAL after ethernet header. */
852 unsigned fVLE : 1;
853 /** Interrupt delay enable. */
854 unsigned fIDE : 1;
855 /** @} */
856 } cmd;
857 struct TDDDw3_st
858 {
859 /** @name STA field (3.3.7.2)
860 * @{ */
861 unsigned fDD : 1; /**< Descriptor done. */
862 unsigned fEC : 1; /**< Excess collision. */
863 unsigned fLC : 1; /**< Late collision. */
864 /** Reserved, except for the usual oddball (82544GC/EI) where it's called TU. */
865 unsigned fTURSV : 1;
866 /** @} */
867 unsigned u4RSV : 4; /**< Reserved field, MBZ. */
868 /** @name POPTS (Packet Option) field (3.3.7.3)
869 * @{ */
870 unsigned fIXSM : 1; /**< Insert IP checksum. */
871 unsigned fTXSM : 1; /**< Insert TCP/UDP checksum. */
872 unsigned u6RSV : 6; /**< Reserved, MBZ. */
873 /** @} */
874 /** @name SPECIAL field - VLAN tag to be inserted after ethernet header.
875 * Requires fEOP, fVLE and CTRL.VME to be set.
876 * @{ */
877 unsigned u16Special: 16; /**< VLAN: Id, Canonical form, Priority. */
878 /** @} */
879 } dw3;
880};
881typedef struct E1kTDData E1KTXDAT;
882
883union E1kTxDesc
884{
885 struct E1kTDLegacy legacy;
886 struct E1kTDContext context;
887 struct E1kTDData data;
888};
889typedef union E1kTxDesc E1KTXDESC;
890AssertCompileSize(E1KTXDESC, 16);
891
892#define RA_CTL_AS 0x0003
893#define RA_CTL_AV 0x8000
894
895union E1kRecAddr
896{
897 uint32_t au32[32];
898 struct RAArray
899 {
900 uint8_t addr[6];
901 uint16_t ctl;
902 } array[16];
903};
904typedef struct E1kRecAddr::RAArray E1KRAELEM;
905typedef union E1kRecAddr E1KRA;
906AssertCompileSize(E1KRA, 8*16);
907
908#define E1K_IP_RF 0x8000 /* reserved fragment flag */
909#define E1K_IP_DF 0x4000 /* dont fragment flag */
910#define E1K_IP_MF 0x2000 /* more fragments flag */
911#define E1K_IP_OFFMASK 0x1fff /* mask for fragmenting bits */
912
913/** @todo use+extend RTNETIPV4 */
914struct E1kIpHeader
915{
916 /* type of service / version / header length */
917 uint16_t tos_ver_hl;
918 /* total length */
919 uint16_t total_len;
920 /* identification */
921 uint16_t ident;
922 /* fragment offset field */
923 uint16_t offset;
924 /* time to live / protocol*/
925 uint16_t ttl_proto;
926 /* checksum */
927 uint16_t chksum;
928 /* source IP address */
929 uint32_t src;
930 /* destination IP address */
931 uint32_t dest;
932};
933AssertCompileSize(struct E1kIpHeader, 20);
934
935#define E1K_TCP_FIN 0x01U
936#define E1K_TCP_SYN 0x02U
937#define E1K_TCP_RST 0x04U
938#define E1K_TCP_PSH 0x08U
939#define E1K_TCP_ACK 0x10U
940#define E1K_TCP_URG 0x20U
941#define E1K_TCP_ECE 0x40U
942#define E1K_TCP_CWR 0x80U
943
944#define E1K_TCP_FLAGS 0x3fU
945
946/** @todo use+extend RTNETTCP */
947struct E1kTcpHeader
948{
949 uint16_t src;
950 uint16_t dest;
951 uint32_t seqno;
952 uint32_t ackno;
953 uint16_t hdrlen_flags;
954 uint16_t wnd;
955 uint16_t chksum;
956 uint16_t urgp;
957};
958AssertCompileSize(struct E1kTcpHeader, 20);
959
960
961#ifdef E1K_WITH_TXD_CACHE
962/** The current Saved state version. */
963#define E1K_SAVEDSTATE_VERSION 4
964/** Saved state version for VirtualBox 4.2 with VLAN tag fields. */
965#define E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG 3
966#else /* !E1K_WITH_TXD_CACHE */
967/** The current Saved state version. */
968#define E1K_SAVEDSTATE_VERSION 3
969#endif /* !E1K_WITH_TXD_CACHE */
970/** Saved state version for VirtualBox 4.1 and earlier.
971 * These did not include VLAN tag fields. */
972#define E1K_SAVEDSTATE_VERSION_VBOX_41 2
973/** Saved state version for VirtualBox 3.0 and earlier.
974 * This did not include the configuration part nor the E1kEEPROM. */
975#define E1K_SAVEDSTATE_VERSION_VBOX_30 1
976
977/**
978 * Device state structure. Holds the current state of device.
979 *
980 * @implements PDMINETWORKDOWN
981 * @implements PDMINETWORKCONFIG
982 * @implements PDMILEDPORTS
983 */
984struct E1kState_st
985{
986 char szInstance[8]; /**< Instance name, e.g. E1000#1. */
987 PDMIBASE IBase;
988 PDMINETWORKDOWN INetworkDown;
989 PDMINETWORKCONFIG INetworkConfig;
990 PDMILEDPORTS ILeds; /**< LED interface */
991 R3PTRTYPE(PPDMIBASE) pDrvBase; /**< Attached network driver. */
992 R3PTRTYPE(PPDMILEDCONNECTORS) pLedsConnector;
993
994 PPDMDEVINSR3 pDevInsR3; /**< Device instance - R3. */
995 R3PTRTYPE(PPDMQUEUE) pTxQueueR3; /**< Transmit queue - R3. */
996 R3PTRTYPE(PPDMQUEUE) pCanRxQueueR3; /**< Rx wakeup signaller - R3. */
997 PPDMINETWORKUPR3 pDrvR3; /**< Attached network driver - R3. */
998 PTMTIMERR3 pRIDTimerR3; /**< Receive Interrupt Delay Timer - R3. */
999 PTMTIMERR3 pRADTimerR3; /**< Receive Absolute Delay Timer - R3. */
1000 PTMTIMERR3 pTIDTimerR3; /**< Transmit Interrupt Delay Timer - R3. */
1001 PTMTIMERR3 pTADTimerR3; /**< Transmit Absolute Delay Timer - R3. */
1002 PTMTIMERR3 pIntTimerR3; /**< Late Interrupt Timer - R3. */
1003 PTMTIMERR3 pLUTimerR3; /**< Link Up(/Restore) Timer. */
1004 /** The scatter / gather buffer used for the current outgoing packet - R3. */
1005 R3PTRTYPE(PPDMSCATTERGATHER) pTxSgR3;
1006
1007 PPDMDEVINSR0 pDevInsR0; /**< Device instance - R0. */
1008 R0PTRTYPE(PPDMQUEUE) pTxQueueR0; /**< Transmit queue - R0. */
1009 R0PTRTYPE(PPDMQUEUE) pCanRxQueueR0; /**< Rx wakeup signaller - R0. */
1010 PPDMINETWORKUPR0 pDrvR0; /**< Attached network driver - R0. */
1011 PTMTIMERR0 pRIDTimerR0; /**< Receive Interrupt Delay Timer - R0. */
1012 PTMTIMERR0 pRADTimerR0; /**< Receive Absolute Delay Timer - R0. */
1013 PTMTIMERR0 pTIDTimerR0; /**< Transmit Interrupt Delay Timer - R0. */
1014 PTMTIMERR0 pTADTimerR0; /**< Transmit Absolute Delay Timer - R0. */
1015 PTMTIMERR0 pIntTimerR0; /**< Late Interrupt Timer - R0. */
1016 PTMTIMERR0 pLUTimerR0; /**< Link Up(/Restore) Timer - R0. */
1017 /** The scatter / gather buffer used for the current outgoing packet - R0. */
1018 R0PTRTYPE(PPDMSCATTERGATHER) pTxSgR0;
1019
1020 PPDMDEVINSRC pDevInsRC; /**< Device instance - RC. */
1021 RCPTRTYPE(PPDMQUEUE) pTxQueueRC; /**< Transmit queue - RC. */
1022 RCPTRTYPE(PPDMQUEUE) pCanRxQueueRC; /**< Rx wakeup signaller - RC. */
1023 PPDMINETWORKUPRC pDrvRC; /**< Attached network driver - RC. */
1024 PTMTIMERRC pRIDTimerRC; /**< Receive Interrupt Delay Timer - RC. */
1025 PTMTIMERRC pRADTimerRC; /**< Receive Absolute Delay Timer - RC. */
1026 PTMTIMERRC pTIDTimerRC; /**< Transmit Interrupt Delay Timer - RC. */
1027 PTMTIMERRC pTADTimerRC; /**< Transmit Absolute Delay Timer - RC. */
1028 PTMTIMERRC pIntTimerRC; /**< Late Interrupt Timer - RC. */
1029 PTMTIMERRC pLUTimerRC; /**< Link Up(/Restore) Timer - RC. */
1030 /** The scatter / gather buffer used for the current outgoing packet - RC. */
1031 RCPTRTYPE(PPDMSCATTERGATHER) pTxSgRC;
1032 RTRCPTR RCPtrAlignment;
1033
1034#if HC_ARCH_BITS == 32
1035 uint32_t Alignment1;
1036#endif
1037 PDMCRITSECT cs; /**< Critical section - what is it protecting? */
1038 PDMCRITSECT csRx; /**< RX Critical section. */
1039#ifdef E1K_WITH_TX_CS
1040 PDMCRITSECT csTx; /**< TX Critical section. */
1041#endif /* E1K_WITH_TX_CS */
1042 /** Base address of memory-mapped registers. */
1043 RTGCPHYS addrMMReg;
1044 /** MAC address obtained from the configuration. */
1045 RTMAC macConfigured;
1046 /** Base port of I/O space region. */
1047 RTIOPORT addrIOPort;
1048 /** EMT: */
1049 PCIDEVICE pciDevice;
1050 /** EMT: Last time the interrupt was acknowledged. */
1051 uint64_t u64AckedAt;
1052 /** All: Used for eliminating spurious interrupts. */
1053 bool fIntRaised;
1054 /** EMT: false if the cable is disconnected by the GUI. */
1055 bool fCableConnected;
1056 /** EMT: */
1057 bool fR0Enabled;
1058 /** EMT: */
1059 bool fGCEnabled;
1060 /** EMT: Compute Ethernet CRC for RX packets. */
1061 bool fEthernetCRC;
1062
1063 bool Alignment2[3];
1064 /** Link up delay (in milliseconds). */
1065 uint32_t cMsLinkUpDelay;
1066
1067 /** All: Device register storage. */
1068 uint32_t auRegs[E1K_NUM_OF_32BIT_REGS];
1069 /** TX/RX: Status LED. */
1070 PDMLED led;
1071 /** TX/RX: Number of packet being sent/received to show in debug log. */
1072 uint32_t u32PktNo;
1073
1074 /** EMT: Offset of the register to be read via IO. */
1075 uint32_t uSelectedReg;
1076 /** EMT: Multicast Table Array. */
1077 uint32_t auMTA[128];
1078 /** EMT: Receive Address registers. */
1079 E1KRA aRecAddr;
1080 /** EMT: VLAN filter table array. */
1081 uint32_t auVFTA[128];
1082 /** EMT: Receive buffer size. */
1083 uint16_t u16RxBSize;
1084 /** EMT: Locked state -- no state alteration possible. */
1085 bool fLocked;
1086 /** EMT: */
1087 bool fDelayInts;
1088 /** All: */
1089 bool fIntMaskUsed;
1090
1091 /** N/A: */
1092 bool volatile fMaybeOutOfSpace;
1093 /** EMT: Gets signalled when more RX descriptors become available. */
1094 RTSEMEVENT hEventMoreRxDescAvail;
1095#ifdef E1K_WITH_RXD_CACHE
1096 /** RX: Fetched RX descriptors. */
1097 E1KRXDESC aRxDescriptors[E1K_RXD_CACHE_SIZE];
1098 /** RX: Actual number of fetched RX descriptors. */
1099 uint32_t nRxDFetched;
1100 /** RX: Index in cache of RX descriptor being processed. */
1101 uint32_t iRxDCurrent;
1102#endif /* E1K_WITH_RXD_CACHE */
1103
1104 /** TX: Context used for TCP segmentation packets. */
1105 E1KTXCTX contextTSE;
1106 /** TX: Context used for ordinary packets. */
1107 E1KTXCTX contextNormal;
1108#ifdef E1K_WITH_TXD_CACHE
1109 /** TX: Fetched TX descriptors. */
1110 E1KTXDESC aTxDescriptors[E1K_TXD_CACHE_SIZE];
1111 /** TX: Actual number of fetched TX descriptors. */
1112 uint8_t nTxDFetched;
1113 /** TX: Index in cache of TX descriptor being processed. */
1114 uint8_t iTxDCurrent;
1115 /** TX: Will this frame be sent as GSO. */
1116 bool fGSO;
1117 /** TX: Number of bytes in next packet. */
1118 uint32_t cbTxAlloc;
1119
1120#endif /* E1K_WITH_TXD_CACHE */
1121 /** GSO context. u8Type is set to PDMNETWORKGSOTYPE_INVALID when not
1122 * applicable to the current TSE mode. */
1123 PDMNETWORKGSO GsoCtx;
1124 /** Scratch space for holding the loopback / fallback scatter / gather
1125 * descriptor. */
1126 union
1127 {
1128 PDMSCATTERGATHER Sg;
1129 uint8_t padding[8 * sizeof(RTUINTPTR)];
1130 } uTxFallback;
1131 /** TX: Transmit packet buffer use for TSE fallback and loopback. */
1132 uint8_t aTxPacketFallback[E1K_MAX_TX_PKT_SIZE];
1133 /** TX: Number of bytes assembled in TX packet buffer. */
1134 uint16_t u16TxPktLen;
1135 /** TX: IP checksum has to be inserted if true. */
1136 bool fIPcsum;
1137 /** TX: TCP/UDP checksum has to be inserted if true. */
1138 bool fTCPcsum;
1139 /** TX: VLAN tag has to be inserted if true. */
1140 bool fVTag;
1141 /** TX: TCI part of VLAN tag to be inserted. */
1142 uint16_t u16VTagTCI;
1143 /** TX TSE fallback: Number of payload bytes remaining in TSE context. */
1144 uint32_t u32PayRemain;
1145 /** TX TSE fallback: Number of header bytes remaining in TSE context. */
1146 uint16_t u16HdrRemain;
1147 /** TX TSE fallback: Flags from template header. */
1148 uint16_t u16SavedFlags;
1149 /** TX TSE fallback: Partial checksum from template header. */
1150 uint32_t u32SavedCsum;
1151 /** ?: Emulated controller type. */
1152 E1KCHIP eChip;
1153
1154 /** EMT: EEPROM emulation */
1155 E1kEEPROM eeprom;
1156 /** EMT: Physical interface emulation. */
1157 PHY phy;
1158
1159#if 0
1160 /** Alignment padding. */
1161 uint8_t Alignment[HC_ARCH_BITS == 64 ? 8 : 4];
1162#endif
1163
1164 STAMCOUNTER StatReceiveBytes;
1165 STAMCOUNTER StatTransmitBytes;
1166#if defined(VBOX_WITH_STATISTICS)
1167 STAMPROFILEADV StatMMIOReadRZ;
1168 STAMPROFILEADV StatMMIOReadR3;
1169 STAMPROFILEADV StatMMIOWriteRZ;
1170 STAMPROFILEADV StatMMIOWriteR3;
1171 STAMPROFILEADV StatEEPROMRead;
1172 STAMPROFILEADV StatEEPROMWrite;
1173 STAMPROFILEADV StatIOReadRZ;
1174 STAMPROFILEADV StatIOReadR3;
1175 STAMPROFILEADV StatIOWriteRZ;
1176 STAMPROFILEADV StatIOWriteR3;
1177 STAMPROFILEADV StatLateIntTimer;
1178 STAMCOUNTER StatLateInts;
1179 STAMCOUNTER StatIntsRaised;
1180 STAMCOUNTER StatIntsPrevented;
1181 STAMPROFILEADV StatReceive;
1182 STAMPROFILEADV StatReceiveCRC;
1183 STAMPROFILEADV StatReceiveFilter;
1184 STAMPROFILEADV StatReceiveStore;
1185 STAMPROFILEADV StatTransmitRZ;
1186 STAMPROFILEADV StatTransmitR3;
1187 STAMPROFILE StatTransmitSendRZ;
1188 STAMPROFILE StatTransmitSendR3;
1189 STAMPROFILE StatRxOverflow;
1190 STAMCOUNTER StatRxOverflowWakeup;
1191 STAMCOUNTER StatTxDescCtxNormal;
1192 STAMCOUNTER StatTxDescCtxTSE;
1193 STAMCOUNTER StatTxDescLegacy;
1194 STAMCOUNTER StatTxDescData;
1195 STAMCOUNTER StatTxDescTSEData;
1196 STAMCOUNTER StatTxPathFallback;
1197 STAMCOUNTER StatTxPathGSO;
1198 STAMCOUNTER StatTxPathRegular;
1199 STAMCOUNTER StatPHYAccesses;
1200
1201#endif /* VBOX_WITH_STATISTICS */
1202
1203#ifdef E1K_INT_STATS
1204 /* Internal stats */
1205 uint32_t uStatInt;
1206 uint32_t uStatIntTry;
1207 int32_t uStatIntLower;
1208 uint32_t uStatIntDly;
1209 int32_t iStatIntLost;
1210 int32_t iStatIntLostOne;
1211 uint32_t uStatDisDly;
1212 uint32_t uStatIntSkip;
1213 uint32_t uStatIntLate;
1214 uint32_t uStatIntMasked;
1215 uint32_t uStatIntEarly;
1216 uint32_t uStatIntRx;
1217 uint32_t uStatIntTx;
1218 uint32_t uStatIntICS;
1219 uint32_t uStatIntRDTR;
1220 uint32_t uStatIntRXDMT0;
1221 uint32_t uStatIntTXQE;
1222 uint32_t uStatTxNoRS;
1223 uint32_t uStatTxIDE;
1224 uint32_t uStatTAD;
1225 uint32_t uStatTID;
1226 uint32_t uStatRAD;
1227 uint32_t uStatRID;
1228 uint32_t uStatRxFrm;
1229 uint32_t uStatTxFrm;
1230 uint32_t uStatDescCtx;
1231 uint32_t uStatDescDat;
1232 uint32_t uStatDescLeg;
1233#endif /* E1K_INT_STATS */
1234};
1235typedef struct E1kState_st E1KSTATE;
1236
1237#ifndef VBOX_DEVICE_STRUCT_TESTCASE
1238
1239/* Forward declarations ******************************************************/
1240static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread);
1241
1242static int e1kRegReadUnimplemented (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1243static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1244static int e1kRegReadAutoClear (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1245static int e1kRegReadDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1246static int e1kRegWriteDefault (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1247#if 0 /* unused */
1248static int e1kRegReadCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1249#endif
1250static int e1kRegWriteCTRL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1251static int e1kRegReadEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1252static int e1kRegWriteEECD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1253static int e1kRegWriteEERD (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1254static int e1kRegWriteMDIC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1255static int e1kRegReadICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1256static int e1kRegWriteICR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1257static int e1kRegWriteICS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1258static int e1kRegWriteIMS (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1259static int e1kRegWriteIMC (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1260static int e1kRegWriteRCTL (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1261static int e1kRegWritePBA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1262static int e1kRegWriteRDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1263static int e1kRegWriteRDTR (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1264static int e1kRegWriteTDT (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1265static int e1kRegReadMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1266static int e1kRegWriteMTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1267static int e1kRegReadRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1268static int e1kRegWriteRA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1269static int e1kRegReadVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1270static int e1kRegWriteVFTA (E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1271
1272/**
1273 * Register map table.
1274 *
1275 * Override fn_read and fn_write to get register-specific behavior.
1276 */
1277const static struct E1kRegMap_st
1278{
1279 /** Register offset in the register space. */
1280 uint32_t offset;
1281 /** Size in bytes. Registers of size > 4 are in fact tables. */
1282 uint32_t size;
1283 /** Readable bits. */
1284 uint32_t readable;
1285 /** Writable bits. */
1286 uint32_t writable;
1287 /** Read callback. */
1288 int (*pfnRead)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value);
1289 /** Write callback. */
1290 int (*pfnWrite)(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t u32Value);
1291 /** Abbreviated name. */
1292 const char *abbrev;
1293 /** Full name. */
1294 const char *name;
1295} s_e1kRegMap[E1K_NUM_OF_REGS] =
1296{
1297 /* offset size read mask write mask read callback write callback abbrev full name */
1298 /*------- ------- ---------- ---------- ----------------------- ------------------------ ---------- ------------------------------*/
1299 { 0x00000, 0x00004, 0xDBF31BE9, 0xDBF31BE9, e1kRegReadDefault , e1kRegWriteCTRL , "CTRL" , "Device Control" },
1300 { 0x00008, 0x00004, 0x0000FDFF, 0x00000000, e1kRegReadDefault , e1kRegWriteUnimplemented, "STATUS" , "Device Status" },
1301 { 0x00010, 0x00004, 0x000027F0, 0x00000070, e1kRegReadEECD , e1kRegWriteEECD , "EECD" , "EEPROM/Flash Control/Data" },
1302 { 0x00014, 0x00004, 0xFFFFFF10, 0xFFFFFF00, e1kRegReadDefault , e1kRegWriteEERD , "EERD" , "EEPROM Read" },
1303 { 0x00018, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CTRL_EXT", "Extended Device Control" },
1304 { 0x0001c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FLA" , "Flash Access (N/A)" },
1305 { 0x00020, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteMDIC , "MDIC" , "MDI Control" },
1306 { 0x00028, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAL" , "Flow Control Address Low" },
1307 { 0x0002c, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCAH" , "Flow Control Address High" },
1308 { 0x00030, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCT" , "Flow Control Type" },
1309 { 0x00038, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "VET" , "VLAN EtherType" },
1310 { 0x000c0, 0x00004, 0x0001F6DF, 0x0001F6DF, e1kRegReadICR , e1kRegWriteICR , "ICR" , "Interrupt Cause Read" },
1311 { 0x000c4, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "ITR" , "Interrupt Throttling" },
1312 { 0x000c8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteICS , "ICS" , "Interrupt Cause Set" },
1313 { 0x000d0, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteIMS , "IMS" , "Interrupt Mask Set/Read" },
1314 { 0x000d8, 0x00004, 0x00000000, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteIMC , "IMC" , "Interrupt Mask Clear" },
1315 { 0x00100, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRCTL , "RCTL" , "Receive Control" },
1316 { 0x00170, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCTTV" , "Flow Control Transmit Timer Value" },
1317 { 0x00178, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXCW" , "Transmit Configuration Word (N/A)" },
1318 { 0x00180, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXCW" , "Receive Configuration Word (N/A)" },
1319 { 0x00400, 0x00004, 0x017FFFFA, 0x017FFFFA, e1kRegReadDefault , e1kRegWriteDefault , "TCTL" , "Transmit Control" },
1320 { 0x00410, 0x00004, 0x3FFFFFFF, 0x3FFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIPG" , "Transmit IPG" },
1321 { 0x00458, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "AIFS" , "Adaptive IFS Throttle - AIT" },
1322 { 0x00e00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LEDCTL" , "LED Control" },
1323 { 0x01000, 0x00004, 0xFFFF007F, 0x0000007F, e1kRegReadDefault , e1kRegWritePBA , "PBA" , "Packet Buffer Allocation" },
1324 { 0x02160, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTL" , "Flow Control Receive Threshold Low" },
1325 { 0x02168, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRTH" , "Flow Control Receive Threshold High" },
1326 { 0x02410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFH" , "Receive Data FIFO Head" },
1327 { 0x02418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFT" , "Receive Data FIFO Tail" },
1328 { 0x02420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFHS" , "Receive Data FIFO Head Saved Register" },
1329 { 0x02428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFTS" , "Receive Data FIFO Tail Saved Register" },
1330 { 0x02430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RDFPC" , "Receive Data FIFO Packet Count" },
1331 { 0x02800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAL" , "Receive Descriptor Base Low" },
1332 { 0x02804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDBAH" , "Receive Descriptor Base High" },
1333 { 0x02808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDLEN" , "Receive Descriptor Length" },
1334 { 0x02810, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "RDH" , "Receive Descriptor Head" },
1335 { 0x02818, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteRDT , "RDT" , "Receive Descriptor Tail" },
1336 { 0x02820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteRDTR , "RDTR" , "Receive Delay Timer" },
1337 { 0x02828, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXDCTL" , "Receive Descriptor Control" },
1338 { 0x0282c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "RADV" , "Receive Interrupt Absolute Delay Timer" },
1339 { 0x02c00, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RSRPD" , "Receive Small Packet Detect Interrupt" },
1340 { 0x03000, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TXDMAC" , "TX DMA Control (N/A)" },
1341 { 0x03410, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFH" , "Transmit Data FIFO Head" },
1342 { 0x03418, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFT" , "Transmit Data FIFO Tail" },
1343 { 0x03420, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFHS" , "Transmit Data FIFO Head Saved Register" },
1344 { 0x03428, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFTS" , "Transmit Data FIFO Tail Saved Register" },
1345 { 0x03430, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TDFPC" , "Transmit Data FIFO Packet Count" },
1346 { 0x03800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAL" , "Transmit Descriptor Base Low" },
1347 { 0x03804, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDBAH" , "Transmit Descriptor Base High" },
1348 { 0x03808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDLEN" , "Transmit Descriptor Length" },
1349 { 0x03810, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TDH" , "Transmit Descriptor Head" },
1350 { 0x03818, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteTDT , "TDT" , "Transmit Descriptor Tail" },
1351 { 0x03820, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TIDV" , "Transmit Interrupt Delay Value" },
1352 { 0x03828, 0x00004, 0xFF3F3F3F, 0xFF3F3F3F, e1kRegReadDefault , e1kRegWriteDefault , "TXDCTL" , "Transmit Descriptor Control" },
1353 { 0x0382c, 0x00004, 0x0000FFFF, 0x0000FFFF, e1kRegReadDefault , e1kRegWriteDefault , "TADV" , "Transmit Absolute Interrupt Delay Timer" },
1354 { 0x03830, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "TSPMT" , "TCP Segmentation Pad and Threshold" },
1355 { 0x04000, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CRCERRS" , "CRC Error Count" },
1356 { 0x04004, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ALGNERRC", "Alignment Error Count" },
1357 { 0x04008, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SYMERRS" , "Symbol Error Count" },
1358 { 0x0400c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RXERRC" , "RX Error Count" },
1359 { 0x04010, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MPC" , "Missed Packets Count" },
1360 { 0x04014, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SCC" , "Single Collision Count" },
1361 { 0x04018, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "ECOL" , "Excessive Collisions Count" },
1362 { 0x0401c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MCC" , "Multiple Collision Count" },
1363 { 0x04020, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "LATECOL" , "Late Collisions Count" },
1364 { 0x04028, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "COLC" , "Collision Count" },
1365 { 0x04030, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "DC" , "Defer Count" },
1366 { 0x04034, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "TNCRS" , "Transmit - No CRS" },
1367 { 0x04038, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "SEC" , "Sequence Error Count" },
1368 { 0x0403c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "CEXTERR" , "Carrier Extension Error Count" },
1369 { 0x04040, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RLEC" , "Receive Length Error Count" },
1370 { 0x04048, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONRXC" , "XON Received Count" },
1371 { 0x0404c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XONTXC" , "XON Transmitted Count" },
1372 { 0x04050, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFRXC" , "XOFF Received Count" },
1373 { 0x04054, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "XOFFTXC" , "XOFF Transmitted Count" },
1374 { 0x04058, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FCRUC" , "FC Received Unsupported Count" },
1375 { 0x0405c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC64" , "Packets Received (64 Bytes) Count" },
1376 { 0x04060, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC127" , "Packets Received (65-127 Bytes) Count" },
1377 { 0x04064, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC255" , "Packets Received (128-255 Bytes) Count" },
1378 { 0x04068, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC511" , "Packets Received (256-511 Bytes) Count" },
1379 { 0x0406c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1023" , "Packets Received (512-1023 Bytes) Count" },
1380 { 0x04070, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PRC1522" , "Packets Received (1024-Max Bytes)" },
1381 { 0x04074, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPRC" , "Good Packets Received Count" },
1382 { 0x04078, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPRC" , "Broadcast Packets Received Count" },
1383 { 0x0407c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPRC" , "Multicast Packets Received Count" },
1384 { 0x04080, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GPTC" , "Good Packets Transmitted Count" },
1385 { 0x04088, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCL" , "Good Octets Received Count (Low)" },
1386 { 0x0408c, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GORCH" , "Good Octets Received Count (Hi)" },
1387 { 0x04090, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCL" , "Good Octets Transmitted Count (Low)" },
1388 { 0x04094, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "GOTCH" , "Good Octets Transmitted Count (Hi)" },
1389 { 0x040a0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RNBC" , "Receive No Buffers Count" },
1390 { 0x040a4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RUC" , "Receive Undersize Count" },
1391 { 0x040a8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RFC" , "Receive Fragment Count" },
1392 { 0x040ac, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "ROC" , "Receive Oversize Count" },
1393 { 0x040b0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "RJC" , "Receive Jabber Count" },
1394 { 0x040b4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPRC" , "Management Packets Received Count" },
1395 { 0x040b8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPDC" , "Management Packets Dropped Count" },
1396 { 0x040bc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "MGTPTC" , "Management Pkts Transmitted Count" },
1397 { 0x040c0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORL" , "Total Octets Received (Lo)" },
1398 { 0x040c4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TORH" , "Total Octets Received (Hi)" },
1399 { 0x040c8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTL" , "Total Octets Transmitted (Lo)" },
1400 { 0x040cc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TOTH" , "Total Octets Transmitted (Hi)" },
1401 { 0x040d0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPR" , "Total Packets Received" },
1402 { 0x040d4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TPT" , "Total Packets Transmitted" },
1403 { 0x040d8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC64" , "Packets Transmitted (64 Bytes) Count" },
1404 { 0x040dc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC127" , "Packets Transmitted (65-127 Bytes) Count" },
1405 { 0x040e0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC255" , "Packets Transmitted (128-255 Bytes) Count" },
1406 { 0x040e4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC511" , "Packets Transmitted (256-511 Bytes) Count" },
1407 { 0x040e8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1023" , "Packets Transmitted (512-1023 Bytes) Count" },
1408 { 0x040ec, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "PTC1522" , "Packets Transmitted (1024 Bytes or Greater) Count" },
1409 { 0x040f0, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "MPTC" , "Multicast Packets Transmitted Count" },
1410 { 0x040f4, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "BPTC" , "Broadcast Packets Transmitted Count" },
1411 { 0x040f8, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTC" , "TCP Segmentation Context Transmitted Count" },
1412 { 0x040fc, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadAutoClear , e1kRegWriteUnimplemented, "TSCTFC" , "TCP Segmentation Context Tx Fail Count" },
1413 { 0x05000, 0x00004, 0x000007FF, 0x000007FF, e1kRegReadDefault , e1kRegWriteDefault , "RXCSUM" , "Receive Checksum Control" },
1414 { 0x05800, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUC" , "Wakeup Control" },
1415 { 0x05808, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUFC" , "Wakeup Filter Control" },
1416 { 0x05810, 0x00004, 0xFFFFFFFF, 0x00000000, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUS" , "Wakeup Status" },
1417 { 0x05820, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadDefault , e1kRegWriteDefault , "MANC" , "Management Control" },
1418 { 0x05838, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IPAV" , "IP Address Valid" },
1419 { 0x05900, 0x00004, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPL" , "Wakeup Packet Length" },
1420 { 0x05200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n)" },
1421 { 0x05400, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n)" },
1422 { 0x05600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n)" },
1423 { 0x05840, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP4AT" , "IPv4 Address Table" },
1424 { 0x05880, 0x00010, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "IP6AT" , "IPv6 Address Table" },
1425 { 0x05a00, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "WUPM" , "Wakeup Packet Memory" },
1426 { 0x05f00, 0x0001c, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFLT" , "Flexible Filter Length Table" },
1427 { 0x09000, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFMT" , "Flexible Filter Mask Table" },
1428 { 0x09800, 0x003fc, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "FFVT" , "Flexible Filter Value Table" },
1429 { 0x10000, 0x10000, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadUnimplemented, e1kRegWriteUnimplemented, "PBM" , "Packet Buffer Memory (n)" },
1430 { 0x00040, 0x00080, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadRA , e1kRegWriteRA , "RA" , "Receive Address (64-bit) (n) (82542)" },
1431 { 0x00200, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadMTA , e1kRegWriteMTA , "MTA" , "Multicast Table Array (n) (82542)" },
1432 { 0x00600, 0x00200, 0xFFFFFFFF, 0xFFFFFFFF, e1kRegReadVFTA , e1kRegWriteVFTA , "VFTA" , "VLAN Filter Table Array (n) (82542)" }
1433};
1434
1435#ifdef DEBUG
1436
1437/**
1438 * Convert U32 value to hex string. Masked bytes are replaced with dots.
1439 *
1440 * @remarks The mask has byte (not bit) granularity (e.g. 000000FF).
1441 *
1442 * @returns The buffer.
1443 *
1444 * @param u32 The word to convert into string.
1445 * @param mask Selects which bytes to convert.
1446 * @param buf Where to put the result.
1447 */
1448static char *e1kU32toHex(uint32_t u32, uint32_t mask, char *buf)
1449{
1450 for (char *ptr = buf + 7; ptr >= buf; --ptr, u32 >>=4, mask >>=4)
1451 {
1452 if (mask & 0xF)
1453 *ptr = (u32 & 0xF) + ((u32 & 0xF) > 9 ? '7' : '0');
1454 else
1455 *ptr = '.';
1456 }
1457 buf[8] = 0;
1458 return buf;
1459}
1460
1461/**
1462 * Returns timer name for debug purposes.
1463 *
1464 * @returns The timer name.
1465 *
1466 * @param pState The device state structure.
1467 * @param pTimer The timer to get the name for.
1468 */
1469DECLINLINE(const char *) e1kGetTimerName(E1KSTATE *pState, PTMTIMER pTimer)
1470{
1471 if (pTimer == pState->CTX_SUFF(pTIDTimer))
1472 return "TID";
1473 if (pTimer == pState->CTX_SUFF(pTADTimer))
1474 return "TAD";
1475 if (pTimer == pState->CTX_SUFF(pRIDTimer))
1476 return "RID";
1477 if (pTimer == pState->CTX_SUFF(pRADTimer))
1478 return "RAD";
1479 if (pTimer == pState->CTX_SUFF(pIntTimer))
1480 return "Int";
1481 return "unknown";
1482}
1483
1484#endif /* DEBUG */
1485
1486/**
1487 * Arm a timer.
1488 *
1489 * @param pState Pointer to the device state structure.
1490 * @param pTimer Pointer to the timer.
1491 * @param uExpireIn Expiration interval in microseconds.
1492 */
1493DECLINLINE(void) e1kArmTimer(E1KSTATE *pState, PTMTIMER pTimer, uint32_t uExpireIn)
1494{
1495 if (pState->fLocked)
1496 return;
1497
1498 E1kLog2(("%s Arming %s timer to fire in %d usec...\n",
1499 INSTANCE(pState), e1kGetTimerName(pState, pTimer), uExpireIn));
1500 TMTimerSet(pTimer, TMTimerFromMicro(pTimer, uExpireIn) +
1501 TMTimerGet(pTimer));
1502}
1503
1504/**
1505 * Cancel a timer.
1506 *
1507 * @param pState Pointer to the device state structure.
1508 * @param pTimer Pointer to the timer.
1509 */
1510DECLINLINE(void) e1kCancelTimer(E1KSTATE *pState, PTMTIMER pTimer)
1511{
1512 E1kLog2(("%s Stopping %s timer...\n",
1513 INSTANCE(pState), e1kGetTimerName(pState, pTimer)));
1514 int rc = TMTimerStop(pTimer);
1515 if (RT_FAILURE(rc))
1516 {
1517 E1kLog2(("%s e1kCancelTimer: TMTimerStop() failed with %Rrc\n",
1518 INSTANCE(pState), rc));
1519 }
1520}
1521
1522#define e1kCsEnter(ps, rc) PDMCritSectEnter(&ps->cs, rc)
1523#define e1kCsLeave(ps) PDMCritSectLeave(&ps->cs)
1524
1525#define e1kCsRxEnter(ps, rc) PDMCritSectEnter(&ps->csRx, rc)
1526#define e1kCsRxLeave(ps) PDMCritSectLeave(&ps->csRx)
1527
1528#ifndef E1K_WITH_TX_CS
1529#define e1kCsTxEnter(ps, rc) VINF_SUCCESS
1530#define e1kCsTxLeave(ps) do { } while (0)
1531#else /* E1K_WITH_TX_CS */
1532# define e1kCsTxEnter(ps, rc) PDMCritSectEnter(&ps->csTx, rc)
1533# define e1kCsTxLeave(ps) PDMCritSectLeave(&ps->csTx)
1534#endif /* E1K_WITH_TX_CS */
1535
1536#ifdef IN_RING3
1537
1538/**
1539 * Wakeup the RX thread.
1540 */
1541static void e1kWakeupReceive(PPDMDEVINS pDevIns)
1542{
1543 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
1544 if ( pState->fMaybeOutOfSpace
1545 && pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
1546 {
1547 STAM_COUNTER_INC(&pState->StatRxOverflowWakeup);
1548 E1kLog(("%s Waking up Out-of-RX-space semaphore\n", INSTANCE(pState)));
1549 RTSemEventSignal(pState->hEventMoreRxDescAvail);
1550 }
1551}
1552
1553/**
1554 * Hardware reset. Revert all registers to initial values.
1555 *
1556 * @param pState The device state structure.
1557 */
1558static void e1kHardReset(E1KSTATE *pState)
1559{
1560 E1kLog(("%s Hard reset triggered\n", INSTANCE(pState)));
1561 memset(pState->auRegs, 0, sizeof(pState->auRegs));
1562 memset(pState->aRecAddr.au32, 0, sizeof(pState->aRecAddr.au32));
1563#ifdef E1K_INIT_RA0
1564 memcpy(pState->aRecAddr.au32, pState->macConfigured.au8,
1565 sizeof(pState->macConfigured.au8));
1566 pState->aRecAddr.array[0].ctl |= RA_CTL_AV;
1567#endif /* E1K_INIT_RA0 */
1568 STATUS = 0x0081; /* SPEED=10b (1000 Mb/s), FD=1b (Full Duplex) */
1569 EECD = 0x0100; /* EE_PRES=1b (EEPROM present) */
1570 CTRL = 0x0a09; /* FRCSPD=1b SPEED=10b LRST=1b FD=1b */
1571 TSPMT = 0x01000400;/* TSMT=0400h TSPBP=0100h */
1572 Assert(GET_BITS(RCTL, BSIZE) == 0);
1573 pState->u16RxBSize = 2048;
1574
1575 /* Reset promiscuous mode */
1576 if (pState->pDrvR3)
1577 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, false);
1578
1579#ifdef E1K_WITH_TXD_CACHE
1580 int rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
1581 if (RT_LIKELY(rc == VINF_SUCCESS))
1582 {
1583 pState->nTxDFetched = 0;
1584 pState->iTxDCurrent = 0;
1585 pState->fGSO = false;
1586 pState->cbTxAlloc = 0;
1587 e1kCsTxLeave(pState);
1588 }
1589#endif /* E1K_WITH_TXD_CACHE */
1590#ifdef E1K_WITH_RXD_CACHE
1591 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
1592 if (RT_LIKELY(rc == VINF_SUCCESS))
1593 {
1594 pState->iRxDCurrent = pState->nRxDFetched = 0;
1595 e1kCsRxLeave(pState);
1596 }
1597#endif /* E1K_WITH_RXD_CACHE */
1598}
1599
1600#endif /* IN_RING3 */
1601
1602/**
1603 * Compute Internet checksum.
1604 *
1605 * @remarks Refer to http://www.netfor2.com/checksum.html for short intro.
1606 *
1607 * @param pState The device state structure.
1608 * @param cpPacket The packet.
1609 * @param cb The size of the packet.
1610 * @param cszText A string denoting direction of packet transfer.
1611 *
1612 * @return The 1's complement of the 1's complement sum.
1613 *
1614 * @thread E1000_TX
1615 */
1616static uint16_t e1kCSum16(const void *pvBuf, size_t cb)
1617{
1618 uint32_t csum = 0;
1619 uint16_t *pu16 = (uint16_t *)pvBuf;
1620
1621 while (cb > 1)
1622 {
1623 csum += *pu16++;
1624 cb -= 2;
1625 }
1626 if (cb)
1627 csum += *(uint8_t*)pu16;
1628 while (csum >> 16)
1629 csum = (csum >> 16) + (csum & 0xFFFF);
1630 return ~csum;
1631}
1632
1633/**
1634 * Dump a packet to debug log.
1635 *
1636 * @param pState The device state structure.
1637 * @param cpPacket The packet.
1638 * @param cb The size of the packet.
1639 * @param cszText A string denoting direction of packet transfer.
1640 * @thread E1000_TX
1641 */
1642DECLINLINE(void) e1kPacketDump(E1KSTATE* pState, const uint8_t *cpPacket, size_t cb, const char *cszText)
1643{
1644#ifdef DEBUG
1645 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1646 {
1647 E1kLog(("%s --- %s packet #%d: ---\n",
1648 INSTANCE(pState), cszText, ++pState->u32PktNo));
1649 E1kLog3(("%.*Rhxd\n", cb, cpPacket));
1650 e1kCsLeave(pState);
1651 }
1652#else
1653 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
1654 {
1655 E1kLogRel(("E1000: %s packet #%d, seq=%x ack=%x\n", cszText, pState->u32PktNo++, ntohl(*(uint32_t*)(cpPacket+0x26)), ntohl(*(uint32_t*)(cpPacket+0x2A))));
1656 e1kCsLeave(pState);
1657 }
1658#endif
1659}
1660
1661/**
1662 * Determine the type of transmit descriptor.
1663 *
1664 * @returns Descriptor type. See E1K_DTYP_XXX defines.
1665 *
1666 * @param pDesc Pointer to descriptor union.
1667 * @thread E1000_TX
1668 */
1669DECLINLINE(int) e1kGetDescType(E1KTXDESC* pDesc)
1670{
1671 if (pDesc->legacy.cmd.fDEXT)
1672 return pDesc->context.dw2.u4DTYP;
1673 return E1K_DTYP_LEGACY;
1674}
1675
1676/**
1677 * Dump receive descriptor to debug log.
1678 *
1679 * @param pState The device state structure.
1680 * @param pDesc Pointer to the descriptor.
1681 * @thread E1000_RX
1682 */
1683static void e1kPrintRDesc(E1KSTATE* pState, E1KRXDESC* pDesc)
1684{
1685 E1kLog2(("%s <-- Receive Descriptor (%d bytes):\n", INSTANCE(pState), pDesc->u16Length));
1686 E1kLog2((" Address=%16LX Length=%04X Csum=%04X\n",
1687 pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum));
1688 E1kLog2((" STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
1689 pDesc->status.fPIF ? "PIF" : "pif",
1690 pDesc->status.fIPCS ? "IPCS" : "ipcs",
1691 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
1692 pDesc->status.fVP ? "VP" : "vp",
1693 pDesc->status.fIXSM ? "IXSM" : "ixsm",
1694 pDesc->status.fEOP ? "EOP" : "eop",
1695 pDesc->status.fDD ? "DD" : "dd",
1696 pDesc->status.fRXE ? "RXE" : "rxe",
1697 pDesc->status.fIPE ? "IPE" : "ipe",
1698 pDesc->status.fTCPE ? "TCPE" : "tcpe",
1699 pDesc->status.fCE ? "CE" : "ce",
1700 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
1701 E1K_SPEC_VLAN(pDesc->status.u16Special),
1702 E1K_SPEC_PRI(pDesc->status.u16Special)));
1703}
1704
1705/**
1706 * Dump transmit descriptor to debug log.
1707 *
1708 * @param pState The device state structure.
1709 * @param pDesc Pointer to descriptor union.
1710 * @param cszDir A string denoting direction of descriptor transfer
1711 * @thread E1000_TX
1712 */
1713static void e1kPrintTDesc(E1KSTATE* pState, E1KTXDESC* pDesc, const char* cszDir,
1714 unsigned uLevel = RTLOGGRPFLAGS_LEVEL_2)
1715{
1716 switch (e1kGetDescType(pDesc))
1717 {
1718 case E1K_DTYP_CONTEXT:
1719 E1kLogX(uLevel, ("%s %s Context Transmit Descriptor %s\n",
1720 INSTANCE(pState), cszDir, cszDir));
1721 E1kLogX(uLevel, (" IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
1722 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
1723 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE));
1724 E1kLogX(uLevel, (" TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
1725 pDesc->context.dw2.fIDE ? " IDE":"",
1726 pDesc->context.dw2.fRS ? " RS" :"",
1727 pDesc->context.dw2.fTSE ? " TSE":"",
1728 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
1729 pDesc->context.dw2.fTCP ? "TCP":"UDP",
1730 pDesc->context.dw2.u20PAYLEN,
1731 pDesc->context.dw3.u8HDRLEN,
1732 pDesc->context.dw3.u16MSS,
1733 pDesc->context.dw3.fDD?"DD":""));
1734 break;
1735 case E1K_DTYP_DATA:
1736 E1kLogX(uLevel, ("%s %s Data Transmit Descriptor (%d bytes) %s\n",
1737 INSTANCE(pState), cszDir, pDesc->data.cmd.u20DTALEN, cszDir));
1738 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1739 pDesc->data.u64BufAddr,
1740 pDesc->data.cmd.u20DTALEN));
1741 E1kLogX(uLevel, (" DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
1742 pDesc->data.cmd.fIDE ? " IDE" :"",
1743 pDesc->data.cmd.fVLE ? " VLE" :"",
1744 pDesc->data.cmd.fRPS ? " RPS" :"",
1745 pDesc->data.cmd.fRS ? " RS" :"",
1746 pDesc->data.cmd.fTSE ? " TSE" :"",
1747 pDesc->data.cmd.fIFCS? " IFCS":"",
1748 pDesc->data.cmd.fEOP ? " EOP" :"",
1749 pDesc->data.dw3.fDD ? " DD" :"",
1750 pDesc->data.dw3.fEC ? " EC" :"",
1751 pDesc->data.dw3.fLC ? " LC" :"",
1752 pDesc->data.dw3.fTXSM? " TXSM":"",
1753 pDesc->data.dw3.fIXSM? " IXSM":"",
1754 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
1755 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
1756 E1K_SPEC_PRI(pDesc->data.dw3.u16Special)));
1757 break;
1758 case E1K_DTYP_LEGACY:
1759 E1kLogX(uLevel, ("%s %s Legacy Transmit Descriptor (%d bytes) %s\n",
1760 INSTANCE(pState), cszDir, pDesc->legacy.cmd.u16Length, cszDir));
1761 E1kLogX(uLevel, (" Address=%16LX DTALEN=%05X\n",
1762 pDesc->data.u64BufAddr,
1763 pDesc->legacy.cmd.u16Length));
1764 E1kLogX(uLevel, (" CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
1765 pDesc->legacy.cmd.fIDE ? " IDE" :"",
1766 pDesc->legacy.cmd.fVLE ? " VLE" :"",
1767 pDesc->legacy.cmd.fRPS ? " RPS" :"",
1768 pDesc->legacy.cmd.fRS ? " RS" :"",
1769 pDesc->legacy.cmd.fIC ? " IC" :"",
1770 pDesc->legacy.cmd.fIFCS? " IFCS":"",
1771 pDesc->legacy.cmd.fEOP ? " EOP" :"",
1772 pDesc->legacy.dw3.fDD ? " DD" :"",
1773 pDesc->legacy.dw3.fEC ? " EC" :"",
1774 pDesc->legacy.dw3.fLC ? " LC" :"",
1775 pDesc->legacy.cmd.u8CSO,
1776 pDesc->legacy.dw3.u8CSS,
1777 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
1778 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
1779 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special)));
1780 break;
1781 default:
1782 E1kLog(("%s %s Invalid Transmit Descriptor %s\n",
1783 INSTANCE(pState), cszDir, cszDir));
1784 break;
1785 }
1786}
1787
1788/**
1789 * Raise interrupt if not masked.
1790 *
1791 * @param pState The device state structure.
1792 */
1793static int e1kRaiseInterrupt(E1KSTATE *pState, int rcBusy, uint32_t u32IntCause = 0)
1794{
1795 int rc = e1kCsEnter(pState, rcBusy);
1796 if (RT_UNLIKELY(rc != VINF_SUCCESS))
1797 return rc;
1798
1799 E1K_INC_ISTAT_CNT(pState->uStatIntTry);
1800 ICR |= u32IntCause;
1801 if (ICR & IMS)
1802 {
1803#if 0
1804 if (pState->fDelayInts)
1805 {
1806 E1K_INC_ISTAT_CNT(pState->uStatIntDly);
1807 pState->iStatIntLostOne = 1;
1808 E1kLog2(("%s e1kRaiseInterrupt: Delayed. ICR=%08x\n",
1809 INSTANCE(pState), ICR));
1810#define E1K_LOST_IRQ_THRSLD 20
1811//#define E1K_LOST_IRQ_THRSLD 200000000
1812 if (pState->iStatIntLost >= E1K_LOST_IRQ_THRSLD)
1813 {
1814 E1kLog2(("%s WARNING! Disabling delayed interrupt logic: delayed=%d, delivered=%d\n",
1815 INSTANCE(pState), pState->uStatIntDly, pState->uStatIntLate));
1816 pState->fIntMaskUsed = false;
1817 pState->uStatDisDly++;
1818 }
1819 }
1820 else
1821#endif
1822 if (pState->fIntRaised)
1823 {
1824 E1K_INC_ISTAT_CNT(pState->uStatIntSkip);
1825 E1kLog2(("%s e1kRaiseInterrupt: Already raised, skipped. ICR&IMS=%08x\n",
1826 INSTANCE(pState), ICR & IMS));
1827 }
1828 else
1829 {
1830#ifdef E1K_ITR_ENABLED
1831 uint64_t tstamp = TMTimerGet(pState->CTX_SUFF(pIntTimer));
1832 /* interrupts/sec = 1 / (256 * 10E-9 * ITR) */
1833 E1kLog2(("%s e1kRaiseInterrupt: tstamp - pState->u64AckedAt = %d, ITR * 256 = %d\n",
1834 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1835 if (!!ITR && pState->fIntMaskUsed && tstamp - pState->u64AckedAt < ITR * 256)
1836 {
1837 E1K_INC_ISTAT_CNT(pState->uStatIntEarly);
1838 E1kLog2(("%s e1kRaiseInterrupt: Too early to raise again: %d ns < %d ns.\n",
1839 INSTANCE(pState), (uint32_t)(tstamp - pState->u64AckedAt), ITR * 256));
1840 }
1841 else
1842#endif
1843 {
1844
1845 /* Since we are delivering the interrupt now
1846 * there is no need to do it later -- stop the timer.
1847 */
1848 TMTimerStop(pState->CTX_SUFF(pIntTimer));
1849 E1K_INC_ISTAT_CNT(pState->uStatInt);
1850 STAM_COUNTER_INC(&pState->StatIntsRaised);
1851 /* Got at least one unmasked interrupt cause */
1852 pState->fIntRaised = true;
1853 /* Raise(1) INTA(0) */
1854 E1kLogRel(("E1000: irq RAISED icr&mask=0x%x, icr=0x%x\n", ICR & IMS, ICR));
1855 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 1);
1856 E1kLog(("%s e1kRaiseInterrupt: Raised. ICR&IMS=%08x\n",
1857 INSTANCE(pState), ICR & IMS));
1858 }
1859 }
1860 }
1861 else
1862 {
1863 E1K_INC_ISTAT_CNT(pState->uStatIntMasked);
1864 E1kLog2(("%s e1kRaiseInterrupt: Not raising, ICR=%08x, IMS=%08x\n",
1865 INSTANCE(pState), ICR, IMS));
1866 }
1867 e1kCsLeave(pState);
1868 return VINF_SUCCESS;
1869}
1870
1871/**
1872 * Compute the physical address of the descriptor.
1873 *
1874 * @returns the physical address of the descriptor.
1875 *
1876 * @param baseHigh High-order 32 bits of descriptor table address.
1877 * @param baseLow Low-order 32 bits of descriptor table address.
1878 * @param idxDesc The descriptor index in the table.
1879 */
1880DECLINLINE(RTGCPHYS) e1kDescAddr(uint32_t baseHigh, uint32_t baseLow, uint32_t idxDesc)
1881{
1882 AssertCompile(sizeof(E1KRXDESC) == sizeof(E1KTXDESC));
1883 return ((uint64_t)baseHigh << 32) + baseLow + idxDesc * sizeof(E1KRXDESC);
1884}
1885
1886#ifdef E1K_WITH_RXD_CACHE
1887/**
1888 * Return the number of RX descriptor that belong to the hardware.
1889 *
1890 * @returns the number of available descriptors in RX ring.
1891 * @param pState The device state structure.
1892 * @thread ???
1893 */
1894DECLINLINE(uint32_t) e1kGetRxLen(E1KSTATE* pState)
1895{
1896 /**
1897 * Make sure RDT won't change during computation. EMT may modify RDT at
1898 * any moment.
1899 */
1900 uint32_t rdt = RDT;
1901 return (RDH > rdt ? RDLEN/sizeof(E1KRXDESC) : 0) + rdt - RDH;
1902}
1903
1904DECLINLINE(unsigned) e1kRxDInCache(E1KSTATE* pState)
1905{
1906 return pState->nRxDFetched > pState->iRxDCurrent ?
1907 pState->nRxDFetched - pState->iRxDCurrent : 0;
1908}
1909
1910DECLINLINE(unsigned) e1kRxDIsCacheEmpty(E1KSTATE* pState)
1911{
1912 return pState->iRxDCurrent >= pState->nRxDFetched;
1913}
1914
1915/**
1916 * Load receive descriptors from guest memory. The caller needs to be in Rx
1917 * critical section.
1918 *
1919 * We need two physical reads in case the tail wrapped around the end of RX
1920 * descriptor ring.
1921 *
1922 * @returns the actual number of descriptors fetched.
1923 * @param pState The device state structure.
1924 * @param pDesc Pointer to descriptor union.
1925 * @param addr Physical address in guest context.
1926 * @thread EMT, RX
1927 */
1928DECLINLINE(unsigned) e1kRxDPrefetch(E1KSTATE* pState)
1929{
1930 /* We've already loaded pState->nRxDFetched descriptors past RDH. */
1931 unsigned nDescsAvailable = e1kGetRxLen(pState) - e1kRxDInCache(pState);
1932 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_RXD_CACHE_SIZE - pState->nRxDFetched);
1933 unsigned nDescsTotal = RDLEN / sizeof(E1KRXDESC);
1934 Assert(nDescsTotal != 0);
1935 if (nDescsTotal == 0)
1936 return 0;
1937 unsigned nFirstNotLoaded = (RDH + e1kRxDInCache(pState)) % nDescsTotal;
1938 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
1939 E1kLog3(("%s e1kRxDPrefetch: nDescsAvailable=%u nDescsToFetch=%u "
1940 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
1941 INSTANCE(pState), nDescsAvailable, nDescsToFetch, nDescsTotal,
1942 nFirstNotLoaded, nDescsInSingleRead));
1943 if (nDescsToFetch == 0)
1944 return 0;
1945 E1KRXDESC* pFirstEmptyDesc = &pState->aRxDescriptors[pState->nRxDFetched];
1946 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
1947 ((uint64_t)RDBAH << 32) + RDBAL + nFirstNotLoaded * sizeof(E1KRXDESC),
1948 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KRXDESC));
1949 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x(0x%x), RDLEN=%08x, RDH=%08x, RDT=%08x\n",
1950 INSTANCE(pState), nDescsInSingleRead,
1951 RDBAH, RDBAL + RDH * sizeof(E1KRXDESC),
1952 nFirstNotLoaded, RDLEN, RDH, RDT));
1953 if (nDescsToFetch > nDescsInSingleRead)
1954 {
1955 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
1956 ((uint64_t)RDBAH << 32) + RDBAL,
1957 pFirstEmptyDesc + nDescsInSingleRead,
1958 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KRXDESC));
1959 E1kLog3(("%s Fetched %u RX descriptors at %08x%08x\n",
1960 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
1961 RDBAH, RDBAL));
1962 }
1963 pState->nRxDFetched += nDescsToFetch;
1964 return nDescsToFetch;
1965}
1966
1967DECLINLINE(E1KRXDESC*) e1kRxDGet(E1KSTATE* pState)
1968{
1969 /* Check the cache first. */
1970 if (pState->iRxDCurrent < pState->nRxDFetched)
1971 return &pState->aRxDescriptors[pState->iRxDCurrent++];
1972 /* Cache is empty, reset it and check if we can fetch more. */
1973 pState->iRxDCurrent = pState->nRxDFetched = 0;
1974 if (e1kRxDPrefetch(pState))
1975 return &pState->aRxDescriptors[pState->iRxDCurrent++];
1976 /* Out of Rx descriptors. */
1977 return NULL;
1978}
1979#endif /* E1K_WITH_RXD_CACHE */
1980
1981/**
1982 * Advance the head pointer of the receive descriptor queue.
1983 *
1984 * @remarks RDH always points to the next available RX descriptor.
1985 *
1986 * @param pState The device state structure.
1987 */
1988DECLINLINE(void) e1kAdvanceRDH(E1KSTATE *pState)
1989{
1990 //e1kCsEnter(pState, RT_SRC_POS);
1991 if (++RDH * sizeof(E1KRXDESC) >= RDLEN)
1992 RDH = 0;
1993 /*
1994 * Compute current receive queue length and fire RXDMT0 interrupt
1995 * if we are low on receive buffers
1996 */
1997 uint32_t uRQueueLen = RDH>RDT ? RDLEN/sizeof(E1KRXDESC)-RDH+RDT : RDT-RDH;
1998 /*
1999 * The minimum threshold is controlled by RDMTS bits of RCTL:
2000 * 00 = 1/2 of RDLEN
2001 * 01 = 1/4 of RDLEN
2002 * 10 = 1/8 of RDLEN
2003 * 11 = reserved
2004 */
2005 uint32_t uMinRQThreshold = RDLEN / sizeof(E1KRXDESC) / (2 << GET_BITS(RCTL, RDMTS));
2006 if (uRQueueLen <= uMinRQThreshold)
2007 {
2008 E1kLogRel(("E1000: low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x\n", RDH, RDT, uRQueueLen, uMinRQThreshold));
2009 E1kLog2(("%s Low on RX descriptors, RDH=%x RDT=%x len=%x threshold=%x, raise an interrupt\n",
2010 INSTANCE(pState), RDH, RDT, uRQueueLen, uMinRQThreshold));
2011 E1K_INC_ISTAT_CNT(pState->uStatIntRXDMT0);
2012 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXDMT0);
2013 }
2014 E1kLog2(("%s e1kAdvanceRDH: at exit RDH=%x RDT=%x len=%x\n",
2015 INSTANCE(pState), RDH, RDT, uRQueueLen));
2016 //e1kCsLeave(pState);
2017}
2018
2019#ifndef E1K_WITH_RXD_CACHE
2020/**
2021 * Store a fragment of received packet that fits into the next available RX
2022 * buffer.
2023 *
2024 * @remarks Trigger the RXT0 interrupt if it is the last fragment of the packet.
2025 *
2026 * @param pState The device state structure.
2027 * @param pDesc The next available RX descriptor.
2028 * @param pvBuf The fragment.
2029 * @param cb The size of the fragment.
2030 */
2031static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2032{
2033 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
2034 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n", pState->szInstance, cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2035 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2036 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2037 /* Write back the descriptor */
2038 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH), pDesc, sizeof(E1KRXDESC));
2039 e1kPrintRDesc(pState, pDesc);
2040 E1kLogRel(("E1000: Wrote back RX desc, RDH=%x\n", RDH));
2041 /* Advance head */
2042 e1kAdvanceRDH(pState);
2043 //E1kLog2(("%s e1kStoreRxFragment: EOP=%d RDTR=%08X RADV=%08X\n", INSTANCE(pState), pDesc->fEOP, RDTR, RADV));
2044 if (pDesc->status.fEOP)
2045 {
2046 /* Complete packet has been stored -- it is time to let the guest know. */
2047#ifdef E1K_USE_RX_TIMERS
2048 if (RDTR)
2049 {
2050 /* Arm the timer to fire in RDTR usec (discard .024) */
2051 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
2052 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2053 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
2054 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
2055 }
2056 else
2057 {
2058#endif
2059 /* 0 delay means immediate interrupt */
2060 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
2061 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
2062#ifdef E1K_USE_RX_TIMERS
2063 }
2064#endif
2065 }
2066 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
2067}
2068#else /* E1K_WITH_RXD_CACHE */
2069/**
2070 * Store a fragment of received packet at the specifed address.
2071 *
2072 * @param pState The device state structure.
2073 * @param pDesc The next available RX descriptor.
2074 * @param pvBuf The fragment.
2075 * @param cb The size of the fragment.
2076 */
2077static DECLCALLBACK(void) e1kStoreRxFragment(E1KSTATE *pState, E1KRXDESC *pDesc, const void *pvBuf, size_t cb)
2078{
2079 STAM_PROFILE_ADV_START(&pState->StatReceiveStore, a);
2080 E1kLog2(("%s e1kStoreRxFragment: store fragment of %04X at %016LX, EOP=%d\n",
2081 INSTANCE(pState), cb, pDesc->u64BufAddr, pDesc->status.fEOP));
2082 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pDesc->u64BufAddr, pvBuf, cb);
2083 pDesc->u16Length = (uint16_t)cb; Assert(pDesc->u16Length == cb);
2084 STAM_PROFILE_ADV_STOP(&pState->StatReceiveStore, a);
2085}
2086#endif /* E1K_WITH_RXD_CACHE */
2087
2088/**
2089 * Returns true if it is a broadcast packet.
2090 *
2091 * @returns true if destination address indicates broadcast.
2092 * @param pvBuf The ethernet packet.
2093 */
2094DECLINLINE(bool) e1kIsBroadcast(const void *pvBuf)
2095{
2096 static const uint8_t s_abBcastAddr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2097 return memcmp(pvBuf, s_abBcastAddr, sizeof(s_abBcastAddr)) == 0;
2098}
2099
2100/**
2101 * Returns true if it is a multicast packet.
2102 *
2103 * @remarks returns true for broadcast packets as well.
2104 * @returns true if destination address indicates multicast.
2105 * @param pvBuf The ethernet packet.
2106 */
2107DECLINLINE(bool) e1kIsMulticast(const void *pvBuf)
2108{
2109 return (*(char*)pvBuf) & 1;
2110}
2111
2112/**
2113 * Set IXSM, IPCS and TCPCS flags according to the packet type.
2114 *
2115 * @remarks We emulate checksum offloading for major packets types only.
2116 *
2117 * @returns VBox status code.
2118 * @param pState The device state structure.
2119 * @param pFrame The available data.
2120 * @param cb Number of bytes available in the buffer.
2121 * @param status Bit fields containing status info.
2122 */
2123static int e1kRxChecksumOffload(E1KSTATE* pState, const uint8_t *pFrame, size_t cb, E1KRXDST *pStatus)
2124{
2125 /** @todo
2126 * It is not safe to bypass checksum verification for packets coming
2127 * from real wire. We currently unable to tell where packets are
2128 * coming from so we tell the driver to ignore our checksum flags
2129 * and do verification in software.
2130 */
2131#if 0
2132 uint16_t uEtherType = ntohs(*(uint16_t*)(pFrame + 12));
2133
2134 E1kLog2(("%s e1kRxChecksumOffload: EtherType=%x\n", INSTANCE(pState), uEtherType));
2135
2136 switch (uEtherType)
2137 {
2138 case 0x800: /* IPv4 */
2139 {
2140 pStatus->fIXSM = false;
2141 pStatus->fIPCS = true;
2142 PRTNETIPV4 pIpHdr4 = (PRTNETIPV4)(pFrame + 14);
2143 /* TCP/UDP checksum offloading works with TCP and UDP only */
2144 pStatus->fTCPCS = pIpHdr4->ip_p == 6 || pIpHdr4->ip_p == 17;
2145 break;
2146 }
2147 case 0x86DD: /* IPv6 */
2148 pStatus->fIXSM = false;
2149 pStatus->fIPCS = false;
2150 pStatus->fTCPCS = true;
2151 break;
2152 default: /* ARP, VLAN, etc. */
2153 pStatus->fIXSM = true;
2154 break;
2155 }
2156#else
2157 pStatus->fIXSM = true;
2158#endif
2159 return VINF_SUCCESS;
2160}
2161
2162/**
2163 * Pad and store received packet.
2164 *
2165 * @remarks Make sure that the packet appears to upper layer as one coming
2166 * from real Ethernet: pad it and insert FCS.
2167 *
2168 * @returns VBox status code.
2169 * @param pState The device state structure.
2170 * @param pvBuf The available data.
2171 * @param cb Number of bytes available in the buffer.
2172 * @param status Bit fields containing status info.
2173 */
2174static int e1kHandleRxPacket(E1KSTATE* pState, const void *pvBuf, size_t cb, E1KRXDST status)
2175{
2176#if defined(IN_RING3) /** @todo Remove this extra copying, it's gonna make us run out of kernel / hypervisor stack! */
2177 uint8_t rxPacket[E1K_MAX_RX_PKT_SIZE];
2178 uint8_t *ptr = rxPacket;
2179
2180 int rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2181 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2182 return rc;
2183
2184 if (cb > 70) /* unqualified guess */
2185 pState->led.Asserted.s.fReading = pState->led.Actual.s.fReading = 1;
2186
2187 Assert(cb <= E1K_MAX_RX_PKT_SIZE);
2188 Assert(cb > 16);
2189 size_t cbMax = ((RCTL & RCTL_LPE) ? E1K_MAX_RX_PKT_SIZE - 4 : 1518) - (status.fVP ? 0 : 4);
2190 E1kLog3(("%s Max RX packet size is %u\n", INSTANCE(pState), cbMax));
2191 if (status.fVP)
2192 {
2193 /* VLAN packet -- strip VLAN tag in VLAN mode */
2194 if ((CTRL & CTRL_VME) && cb > 16)
2195 {
2196 uint16_t *u16Ptr = (uint16_t*)pvBuf;
2197 memcpy(rxPacket, pvBuf, 12); /* Copy src and dst addresses */
2198 status.u16Special = RT_BE2H_U16(u16Ptr[7]); /* Extract VLAN tag */
2199 memcpy(rxPacket + 12, (uint8_t*)pvBuf + 16, cb - 16); /* Copy the rest of the packet */
2200 cb -= 4;
2201 E1kLog3(("%s Stripped tag for VLAN %u (cb=%u)\n",
2202 INSTANCE(pState), status.u16Special, cb));
2203 }
2204 else
2205 status.fVP = false; /* Set VP only if we stripped the tag */
2206 }
2207 else
2208 memcpy(rxPacket, pvBuf, cb);
2209 /* Pad short packets */
2210 if (cb < 60)
2211 {
2212 memset(rxPacket + cb, 0, 60 - cb);
2213 cb = 60;
2214 }
2215 if (!(RCTL & RCTL_SECRC) && cb <= cbMax)
2216 {
2217 STAM_PROFILE_ADV_START(&pState->StatReceiveCRC, a);
2218 /*
2219 * Add FCS if CRC stripping is not enabled. Since the value of CRC
2220 * is ignored by most of drivers we may as well save us the trouble
2221 * of calculating it (see EthernetCRC CFGM parameter).
2222 */
2223 if (pState->fEthernetCRC)
2224 *(uint32_t*)(rxPacket + cb) = RTCrc32(rxPacket, cb);
2225 cb += sizeof(uint32_t);
2226 STAM_PROFILE_ADV_STOP(&pState->StatReceiveCRC, a);
2227 E1kLog3(("%s Added FCS (cb=%u)\n", INSTANCE(pState), cb));
2228 }
2229 /* Compute checksum of complete packet */
2230 uint16_t checksum = e1kCSum16(rxPacket + GET_BITS(RXCSUM, PCSS), cb);
2231 e1kRxChecksumOffload(pState, rxPacket, cb, &status);
2232
2233 /* Update stats */
2234 E1K_INC_CNT32(GPRC);
2235 if (e1kIsBroadcast(pvBuf))
2236 E1K_INC_CNT32(BPRC);
2237 else if (e1kIsMulticast(pvBuf))
2238 E1K_INC_CNT32(MPRC);
2239 /* Update octet receive counter */
2240 E1K_ADD_CNT64(GORCL, GORCH, cb);
2241 STAM_REL_COUNTER_ADD(&pState->StatReceiveBytes, cb);
2242 if (cb == 64)
2243 E1K_INC_CNT32(PRC64);
2244 else if (cb < 128)
2245 E1K_INC_CNT32(PRC127);
2246 else if (cb < 256)
2247 E1K_INC_CNT32(PRC255);
2248 else if (cb < 512)
2249 E1K_INC_CNT32(PRC511);
2250 else if (cb < 1024)
2251 E1K_INC_CNT32(PRC1023);
2252 else
2253 E1K_INC_CNT32(PRC1522);
2254
2255 E1K_INC_ISTAT_CNT(pState->uStatRxFrm);
2256
2257#ifdef E1K_WITH_RXD_CACHE
2258 while (cb > 0)
2259 {
2260 E1KRXDESC *pDesc = e1kRxDGet(pState);
2261
2262 if (pDesc == NULL)
2263 {
2264 E1kLog(("%s Out of receive buffers, dropping the packet "
2265 "(cb=%u, in_cache=%u, RDH=%x RDT=%x)\n",
2266 INSTANCE(pState), cb, e1kRxDInCache(pState), RDH, RDT));
2267 break;
2268 }
2269#else /* !E1K_WITH_RXD_CACHE */
2270 if (RDH == RDT)
2271 {
2272 E1kLog(("%s Out of receive buffers, dropping the packet\n",
2273 INSTANCE(pState)));
2274 }
2275 /* Store the packet to receive buffers */
2276 while (RDH != RDT)
2277 {
2278 /* Load the descriptor pointed by head */
2279 E1KRXDESC desc, *pDesc = &desc;
2280 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
2281 &desc, sizeof(desc));
2282#endif /* !E1K_WITH_RXD_CACHE */
2283 if (pDesc->u64BufAddr)
2284 {
2285 /* Update descriptor */
2286 pDesc->status = status;
2287 pDesc->u16Checksum = checksum;
2288 pDesc->status.fDD = true;
2289
2290 /*
2291 * We need to leave Rx critical section here or we risk deadlocking
2292 * with EMT in e1kRegWriteRDT when the write is to an unallocated
2293 * page or has an access handler associated with it.
2294 * Note that it is safe to leave the critical section here since
2295 * e1kRegWriteRDT() never modifies RDH. It never touches already
2296 * fetched RxD cache entries either.
2297 */
2298 if (cb > pState->u16RxBSize)
2299 {
2300 pDesc->status.fEOP = false;
2301 e1kCsRxLeave(pState);
2302 e1kStoreRxFragment(pState, pDesc, ptr, pState->u16RxBSize);
2303 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2304 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2305 return rc;
2306 ptr += pState->u16RxBSize;
2307 cb -= pState->u16RxBSize;
2308 }
2309 else
2310 {
2311 pDesc->status.fEOP = true;
2312 e1kCsRxLeave(pState);
2313 e1kStoreRxFragment(pState, pDesc, ptr, cb);
2314#ifdef E1K_WITH_RXD_CACHE
2315 rc = e1kCsRxEnter(pState, VERR_SEM_BUSY);
2316 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2317 return rc;
2318 cb = 0;
2319#else /* !E1K_WITH_RXD_CACHE */
2320 pState->led.Actual.s.fReading = 0;
2321 return VINF_SUCCESS;
2322#endif /* !E1K_WITH_RXD_CACHE */
2323 }
2324 /*
2325 * Note: RDH is advanced by e1kStoreRxFragment if E1K_WITH_RXD_CACHE
2326 * is not defined.
2327 */
2328 }
2329#ifndef E1K_WITH_RXD_CACHE
2330 else
2331 {
2332#endif /* !E1K_WITH_RXD_CACHE */
2333 /* Write back the descriptor. */
2334 pDesc->status.fDD = true;
2335 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns),
2336 e1kDescAddr(RDBAH, RDBAL, RDH),
2337 pDesc, sizeof(E1KRXDESC));
2338 e1kAdvanceRDH(pState);
2339 e1kPrintRDesc(pState, pDesc);
2340#ifndef E1K_WITH_RXD_CACHE
2341 }
2342#endif /* !E1K_WITH_RXD_CACHE */
2343 }
2344
2345 if (cb > 0)
2346 E1kLog(("%s Out of receive buffers, dropping %u bytes", INSTANCE(pState), cb));
2347
2348 pState->led.Actual.s.fReading = 0;
2349
2350 e1kCsRxLeave(pState);
2351#ifdef E1K_WITH_RXD_CACHE
2352 /* Complete packet has been stored -- it is time to let the guest know. */
2353# ifdef E1K_USE_RX_TIMERS
2354 if (RDTR)
2355 {
2356 /* Arm the timer to fire in RDTR usec (discard .024) */
2357 e1kArmTimer(pState, pState->CTX_SUFF(pRIDTimer), RDTR);
2358 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
2359 if (RADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pRADTimer)))
2360 e1kArmTimer(pState, pState->CTX_SUFF(pRADTimer), RADV);
2361 }
2362 else
2363 {
2364# endif /* E1K_USE_RX_TIMERS */
2365 /* 0 delay means immediate interrupt */
2366 E1K_INC_ISTAT_CNT(pState->uStatIntRx);
2367 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_RXT0);
2368# ifdef E1K_USE_RX_TIMERS
2369 }
2370# endif /* E1K_USE_RX_TIMERS */
2371#endif /* E1K_WITH_RXD_CACHE */
2372
2373 return VINF_SUCCESS;
2374#else
2375 return VERR_INTERNAL_ERROR_2;
2376#endif
2377}
2378
2379
2380/**
2381 * Bring the link up after the configured delay, 5 seconds by default.
2382 *
2383 * @param pState The device state structure.
2384 * @thread any
2385 */
2386DECLINLINE(void) e1kBringLinkUpDelayed(E1KSTATE* pState)
2387{
2388 E1kLog(("%s Will bring up the link in %d seconds...\n",
2389 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
2390 e1kArmTimer(pState, pState->CTX_SUFF(pLUTimer), pState->cMsLinkUpDelay * 1000);
2391}
2392
2393#if 0 /* unused */
2394/**
2395 * Read handler for Device Status register.
2396 *
2397 * Get the link status from PHY.
2398 *
2399 * @returns VBox status code.
2400 *
2401 * @param pState The device state structure.
2402 * @param offset Register offset in memory-mapped frame.
2403 * @param index Register index in register array.
2404 * @param mask Used to implement partial reads (8 and 16-bit).
2405 */
2406static int e1kRegReadCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2407{
2408 E1kLog(("%s e1kRegReadCTRL: mdio dir=%s mdc dir=%s mdc=%d\n",
2409 INSTANCE(pState), (CTRL & CTRL_MDIO_DIR)?"OUT":"IN ",
2410 (CTRL & CTRL_MDC_DIR)?"OUT":"IN ", !!(CTRL & CTRL_MDC)));
2411 if ((CTRL & CTRL_MDIO_DIR) == 0 && (CTRL & CTRL_MDC))
2412 {
2413 /* MDC is high and MDIO pin is used for input, read MDIO pin from PHY */
2414 if (Phy::readMDIO(&pState->phy))
2415 *pu32Value = CTRL | CTRL_MDIO;
2416 else
2417 *pu32Value = CTRL & ~CTRL_MDIO;
2418 E1kLog(("%s e1kRegReadCTRL: Phy::readMDIO(%d)\n",
2419 INSTANCE(pState), !!(*pu32Value & CTRL_MDIO)));
2420 }
2421 else
2422 {
2423 /* MDIO pin is used for output, ignore it */
2424 *pu32Value = CTRL;
2425 }
2426 return VINF_SUCCESS;
2427}
2428#endif /* unused */
2429
2430/**
2431 * Write handler for Device Control register.
2432 *
2433 * Handles reset.
2434 *
2435 * @param pState The device state structure.
2436 * @param offset Register offset in memory-mapped frame.
2437 * @param index Register index in register array.
2438 * @param value The value to store.
2439 * @param mask Used to implement partial writes (8 and 16-bit).
2440 * @thread EMT
2441 */
2442static int e1kRegWriteCTRL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2443{
2444 int rc = VINF_SUCCESS;
2445
2446 if (value & CTRL_RESET)
2447 { /* RST */
2448#ifndef IN_RING3
2449 return VINF_IOM_R3_IOPORT_WRITE;
2450#else
2451 e1kHardReset(pState);
2452#endif
2453 }
2454 else
2455 {
2456 if ( (value & CTRL_SLU)
2457 && pState->fCableConnected
2458 && !(STATUS & STATUS_LU))
2459 {
2460 /* The driver indicates that we should bring up the link */
2461 /* Do so in 5 seconds (by default). */
2462 e1kBringLinkUpDelayed(pState);
2463 /*
2464 * Change the status (but not PHY status) anyway as Windows expects
2465 * it for 82543GC.
2466 */
2467 STATUS |= STATUS_LU;
2468 }
2469 if (value & CTRL_VME)
2470 {
2471 E1kLog(("%s VLAN Mode Enabled\n", INSTANCE(pState)));
2472 }
2473 E1kLog(("%s e1kRegWriteCTRL: mdio dir=%s mdc dir=%s mdc=%s mdio=%d\n",
2474 INSTANCE(pState), (value & CTRL_MDIO_DIR)?"OUT":"IN ",
2475 (value & CTRL_MDC_DIR)?"OUT":"IN ", (value & CTRL_MDC)?"HIGH":"LOW ", !!(value & CTRL_MDIO)));
2476 if (value & CTRL_MDC)
2477 {
2478 if (value & CTRL_MDIO_DIR)
2479 {
2480 E1kLog(("%s e1kRegWriteCTRL: Phy::writeMDIO(%d)\n", INSTANCE(pState), !!(value & CTRL_MDIO)));
2481 /* MDIO direction pin is set to output and MDC is high, write MDIO pin value to PHY */
2482 Phy::writeMDIO(&pState->phy, !!(value & CTRL_MDIO));
2483 }
2484 else
2485 {
2486 if (Phy::readMDIO(&pState->phy))
2487 value |= CTRL_MDIO;
2488 else
2489 value &= ~CTRL_MDIO;
2490 E1kLog(("%s e1kRegWriteCTRL: Phy::readMDIO(%d)\n",
2491 INSTANCE(pState), !!(value & CTRL_MDIO)));
2492 }
2493 }
2494 rc = e1kRegWriteDefault(pState, offset, index, value);
2495 }
2496
2497 return rc;
2498}
2499
2500/**
2501 * Write handler for EEPROM/Flash Control/Data register.
2502 *
2503 * Handles EEPROM access requests; forwards writes to EEPROM device if access has been granted.
2504 *
2505 * @param pState The device state structure.
2506 * @param offset Register offset in memory-mapped frame.
2507 * @param index Register index in register array.
2508 * @param value The value to store.
2509 * @param mask Used to implement partial writes (8 and 16-bit).
2510 * @thread EMT
2511 */
2512static int e1kRegWriteEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2513{
2514#ifdef IN_RING3
2515 /* So far we are concerned with lower byte only */
2516 if ((EECD & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2517 {
2518 /* Access to EEPROM granted -- forward 4-wire bits to EEPROM device */
2519 /* Note: 82543GC does not need to request EEPROM access */
2520 STAM_PROFILE_ADV_START(&pState->StatEEPROMWrite, a);
2521 pState->eeprom.write(value & EECD_EE_WIRES);
2522 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMWrite, a);
2523 }
2524 if (value & EECD_EE_REQ)
2525 EECD |= EECD_EE_REQ|EECD_EE_GNT;
2526 else
2527 EECD &= ~EECD_EE_GNT;
2528 //e1kRegWriteDefault(pState, offset, index, value );
2529
2530 return VINF_SUCCESS;
2531#else /* !IN_RING3 */
2532 return VINF_IOM_R3_MMIO_WRITE;
2533#endif /* !IN_RING3 */
2534}
2535
2536/**
2537 * Read handler for EEPROM/Flash Control/Data register.
2538 *
2539 * Lower 4 bits come from EEPROM device if EEPROM access has been granted.
2540 *
2541 * @returns VBox status code.
2542 *
2543 * @param pState The device state structure.
2544 * @param offset Register offset in memory-mapped frame.
2545 * @param index Register index in register array.
2546 * @param mask Used to implement partial reads (8 and 16-bit).
2547 * @thread EMT
2548 */
2549static int e1kRegReadEECD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2550{
2551#ifdef IN_RING3
2552 uint32_t value;
2553 int rc = e1kRegReadDefault(pState, offset, index, &value);
2554 if (RT_SUCCESS(rc))
2555 {
2556 if ((value & EECD_EE_GNT) || pState->eChip == E1K_CHIP_82543GC)
2557 {
2558 /* Note: 82543GC does not need to request EEPROM access */
2559 /* Access to EEPROM granted -- get 4-wire bits to EEPROM device */
2560 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2561 value |= pState->eeprom.read();
2562 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2563 }
2564 *pu32Value = value;
2565 }
2566
2567 return rc;
2568#else /* !IN_RING3 */
2569 return VINF_IOM_R3_MMIO_READ;
2570#endif /* !IN_RING3 */
2571}
2572
2573/**
2574 * Write handler for EEPROM Read register.
2575 *
2576 * Handles EEPROM word access requests, reads EEPROM and stores the result
2577 * into DATA field.
2578 *
2579 * @param pState The device state structure.
2580 * @param offset Register offset in memory-mapped frame.
2581 * @param index Register index in register array.
2582 * @param value The value to store.
2583 * @param mask Used to implement partial writes (8 and 16-bit).
2584 * @thread EMT
2585 */
2586static int e1kRegWriteEERD(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2587{
2588#ifdef IN_RING3
2589 /* Make use of 'writable' and 'readable' masks. */
2590 e1kRegWriteDefault(pState, offset, index, value);
2591 /* DONE and DATA are set only if read was triggered by START. */
2592 if (value & EERD_START)
2593 {
2594 uint16_t tmp;
2595 STAM_PROFILE_ADV_START(&pState->StatEEPROMRead, a);
2596 if (pState->eeprom.readWord(GET_BITS_V(value, EERD, ADDR), &tmp))
2597 SET_BITS(EERD, DATA, tmp);
2598 EERD |= EERD_DONE;
2599 STAM_PROFILE_ADV_STOP(&pState->StatEEPROMRead, a);
2600 }
2601
2602 return VINF_SUCCESS;
2603#else /* !IN_RING3 */
2604 return VINF_IOM_R3_MMIO_WRITE;
2605#endif /* !IN_RING3 */
2606}
2607
2608
2609/**
2610 * Write handler for MDI Control register.
2611 *
2612 * Handles PHY read/write requests; forwards requests to internal PHY device.
2613 *
2614 * @param pState The device state structure.
2615 * @param offset Register offset in memory-mapped frame.
2616 * @param index Register index in register array.
2617 * @param value The value to store.
2618 * @param mask Used to implement partial writes (8 and 16-bit).
2619 * @thread EMT
2620 */
2621static int e1kRegWriteMDIC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2622{
2623 if (value & MDIC_INT_EN)
2624 {
2625 E1kLog(("%s ERROR! Interrupt at the end of an MDI cycle is not supported yet.\n",
2626 INSTANCE(pState)));
2627 }
2628 else if (value & MDIC_READY)
2629 {
2630 E1kLog(("%s ERROR! Ready bit is not reset by software during write operation.\n",
2631 INSTANCE(pState)));
2632 }
2633 else if (GET_BITS_V(value, MDIC, PHY) != 1)
2634 {
2635 E1kLog(("%s ERROR! Access to invalid PHY detected, phy=%d.\n",
2636 INSTANCE(pState), GET_BITS_V(value, MDIC, PHY)));
2637 }
2638 else
2639 {
2640 /* Store the value */
2641 e1kRegWriteDefault(pState, offset, index, value);
2642 STAM_COUNTER_INC(&pState->StatPHYAccesses);
2643 /* Forward op to PHY */
2644 if (value & MDIC_OP_READ)
2645 SET_BITS(MDIC, DATA, Phy::readRegister(&pState->phy, GET_BITS_V(value, MDIC, REG)));
2646 else
2647 Phy::writeRegister(&pState->phy, GET_BITS_V(value, MDIC, REG), value & MDIC_DATA_MASK);
2648 /* Let software know that we are done */
2649 MDIC |= MDIC_READY;
2650 }
2651
2652 return VINF_SUCCESS;
2653}
2654
2655/**
2656 * Write handler for Interrupt Cause Read register.
2657 *
2658 * Bits corresponding to 1s in 'value' will be cleared in ICR register.
2659 *
2660 * @param pState The device state structure.
2661 * @param offset Register offset in memory-mapped frame.
2662 * @param index Register index in register array.
2663 * @param value The value to store.
2664 * @param mask Used to implement partial writes (8 and 16-bit).
2665 * @thread EMT
2666 */
2667static int e1kRegWriteICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2668{
2669 ICR &= ~value;
2670
2671 return VINF_SUCCESS;
2672}
2673
2674/**
2675 * Read handler for Interrupt Cause Read register.
2676 *
2677 * Reading this register acknowledges all interrupts.
2678 *
2679 * @returns VBox status code.
2680 *
2681 * @param pState The device state structure.
2682 * @param offset Register offset in memory-mapped frame.
2683 * @param index Register index in register array.
2684 * @param mask Not used.
2685 * @thread EMT
2686 */
2687static int e1kRegReadICR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
2688{
2689 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_READ);
2690 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2691 return rc;
2692
2693 uint32_t value = 0;
2694 rc = e1kRegReadDefault(pState, offset, index, &value);
2695 if (RT_SUCCESS(rc))
2696 {
2697 if (value)
2698 {
2699 /*
2700 * Not clearing ICR causes QNX to hang as it reads ICR in a loop
2701 * with disabled interrupts.
2702 */
2703 //if (IMS)
2704 if (1)
2705 {
2706 /*
2707 * Interrupts were enabled -- we are supposedly at the very
2708 * beginning of interrupt handler
2709 */
2710 E1kLogRel(("E1000: irq lowered, icr=0x%x\n", ICR));
2711 E1kLog(("%s e1kRegReadICR: Lowered IRQ (%08x)\n", INSTANCE(pState), ICR));
2712 /* Clear all pending interrupts */
2713 ICR = 0;
2714 pState->fIntRaised = false;
2715 /* Lower(0) INTA(0) */
2716 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2717
2718 pState->u64AckedAt = TMTimerGet(pState->CTX_SUFF(pIntTimer));
2719 if (pState->fIntMaskUsed)
2720 pState->fDelayInts = true;
2721 }
2722 else
2723 {
2724 /*
2725 * Interrupts are disabled -- in windows guests ICR read is done
2726 * just before re-enabling interrupts
2727 */
2728 E1kLog(("%s e1kRegReadICR: Suppressing auto-clear due to disabled interrupts (%08x)\n", INSTANCE(pState), ICR));
2729 }
2730 }
2731 *pu32Value = value;
2732 }
2733 e1kCsLeave(pState);
2734
2735 return rc;
2736}
2737
2738/**
2739 * Write handler for Interrupt Cause Set register.
2740 *
2741 * Bits corresponding to 1s in 'value' will be set in ICR register.
2742 *
2743 * @param pState The device state structure.
2744 * @param offset Register offset in memory-mapped frame.
2745 * @param index Register index in register array.
2746 * @param value The value to store.
2747 * @param mask Used to implement partial writes (8 and 16-bit).
2748 * @thread EMT
2749 */
2750static int e1kRegWriteICS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2751{
2752 E1K_INC_ISTAT_CNT(pState->uStatIntICS);
2753 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, value & s_e1kRegMap[ICS_IDX].writable);
2754}
2755
2756/**
2757 * Write handler for Interrupt Mask Set register.
2758 *
2759 * Will trigger pending interrupts.
2760 *
2761 * @param pState The device state structure.
2762 * @param offset Register offset in memory-mapped frame.
2763 * @param index Register index in register array.
2764 * @param value The value to store.
2765 * @param mask Used to implement partial writes (8 and 16-bit).
2766 * @thread EMT
2767 */
2768static int e1kRegWriteIMS(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2769{
2770 IMS |= value;
2771 E1kLogRel(("E1000: irq enabled, RDH=%x RDT=%x TDH=%x TDT=%x\n", RDH, RDT, TDH, TDT));
2772 E1kLog(("%s e1kRegWriteIMS: IRQ enabled\n", INSTANCE(pState)));
2773 /* Mask changes, we need to raise pending interrupts. */
2774 if ((ICR & IMS) && !pState->fLocked)
2775 {
2776 E1kLog2(("%s e1kRegWriteIMS: IRQ pending (%08x), arming late int timer...\n",
2777 INSTANCE(pState), ICR));
2778 /* Raising an interrupt immediately causes win7 to hang upon NIC reconfiguration, see @bugref{5023}. */
2779 TMTimerSet(pState->CTX_SUFF(pIntTimer), TMTimerFromNano(pState->CTX_SUFF(pIntTimer), ITR * 256) +
2780 TMTimerGet(pState->CTX_SUFF(pIntTimer)));
2781 }
2782
2783 return VINF_SUCCESS;
2784}
2785
2786/**
2787 * Write handler for Interrupt Mask Clear register.
2788 *
2789 * Bits corresponding to 1s in 'value' will be cleared in IMS register.
2790 *
2791 * @param pState The device state structure.
2792 * @param offset Register offset in memory-mapped frame.
2793 * @param index Register index in register array.
2794 * @param value The value to store.
2795 * @param mask Used to implement partial writes (8 and 16-bit).
2796 * @thread EMT
2797 */
2798static int e1kRegWriteIMC(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2799{
2800 int rc = e1kCsEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2801 if (RT_UNLIKELY(rc != VINF_SUCCESS))
2802 return rc;
2803 if (pState->fIntRaised)
2804 {
2805 /*
2806 * Technically we should reset fIntRaised in ICR read handler, but it will cause
2807 * Windows to freeze since it may receive an interrupt while still in the very beginning
2808 * of interrupt handler.
2809 */
2810 E1K_INC_ISTAT_CNT(pState->uStatIntLower);
2811 STAM_COUNTER_INC(&pState->StatIntsPrevented);
2812 E1kLogRel(("E1000: irq lowered (IMC), icr=0x%x\n", ICR));
2813 /* Lower(0) INTA(0) */
2814 PDMDevHlpPCISetIrq(pState->CTX_SUFF(pDevIns), 0, 0);
2815 pState->fIntRaised = false;
2816 E1kLog(("%s e1kRegWriteIMC: Lowered IRQ: ICR=%08x\n", INSTANCE(pState), ICR));
2817 }
2818 IMS &= ~value;
2819 E1kLog(("%s e1kRegWriteIMC: IRQ disabled\n", INSTANCE(pState)));
2820 e1kCsLeave(pState);
2821
2822 return VINF_SUCCESS;
2823}
2824
2825/**
2826 * Write handler for Receive Control register.
2827 *
2828 * @param pState The device state structure.
2829 * @param offset Register offset in memory-mapped frame.
2830 * @param index Register index in register array.
2831 * @param value The value to store.
2832 * @param mask Used to implement partial writes (8 and 16-bit).
2833 * @thread EMT
2834 */
2835static int e1kRegWriteRCTL(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2836{
2837 /* Update promiscuous mode */
2838 bool fBecomePromiscous = !!(value & (RCTL_UPE | RCTL_MPE));
2839 if (fBecomePromiscous != !!( RCTL & (RCTL_UPE | RCTL_MPE)))
2840 {
2841 /* Promiscuity has changed, pass the knowledge on. */
2842#ifndef IN_RING3
2843 return VINF_IOM_R3_IOPORT_WRITE;
2844#else
2845 if (pState->pDrvR3)
2846 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3, fBecomePromiscous);
2847#endif
2848 }
2849
2850 /* Adjust receive buffer size */
2851 unsigned cbRxBuf = 2048 >> GET_BITS_V(value, RCTL, BSIZE);
2852 if (value & RCTL_BSEX)
2853 cbRxBuf *= 16;
2854 if (cbRxBuf != pState->u16RxBSize)
2855 E1kLog2(("%s e1kRegWriteRCTL: Setting receive buffer size to %d (old %d)\n",
2856 INSTANCE(pState), cbRxBuf, pState->u16RxBSize));
2857 pState->u16RxBSize = cbRxBuf;
2858
2859 /* Update the register */
2860 e1kRegWriteDefault(pState, offset, index, value);
2861
2862 return VINF_SUCCESS;
2863}
2864
2865/**
2866 * Write handler for Packet Buffer Allocation register.
2867 *
2868 * TXA = 64 - RXA.
2869 *
2870 * @param pState The device state structure.
2871 * @param offset Register offset in memory-mapped frame.
2872 * @param index Register index in register array.
2873 * @param value The value to store.
2874 * @param mask Used to implement partial writes (8 and 16-bit).
2875 * @thread EMT
2876 */
2877static int e1kRegWritePBA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2878{
2879 e1kRegWriteDefault(pState, offset, index, value);
2880 PBA_st->txa = 64 - PBA_st->rxa;
2881
2882 return VINF_SUCCESS;
2883}
2884
2885/**
2886 * Write handler for Receive Descriptor Tail register.
2887 *
2888 * @remarks Write into RDT forces switch to HC and signal to
2889 * e1kNetworkDown_WaitReceiveAvail().
2890 *
2891 * @returns VBox status code.
2892 *
2893 * @param pState The device state structure.
2894 * @param offset Register offset in memory-mapped frame.
2895 * @param index Register index in register array.
2896 * @param value The value to store.
2897 * @param mask Used to implement partial writes (8 and 16-bit).
2898 * @thread EMT
2899 */
2900static int e1kRegWriteRDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2901{
2902#ifndef IN_RING3
2903 /* XXX */
2904// return VINF_IOM_R3_MMIO_WRITE;
2905#endif
2906 int rc = e1kCsRxEnter(pState, VINF_IOM_R3_MMIO_WRITE);
2907 if (RT_LIKELY(rc == VINF_SUCCESS))
2908 {
2909 E1kLog(("%s e1kRegWriteRDT\n", INSTANCE(pState)));
2910 rc = e1kRegWriteDefault(pState, offset, index, value);
2911#ifdef E1K_WITH_RXD_CACHE
2912 /*
2913 * We need to fetch descriptors now as RDT may go whole circle
2914 * before we attempt to store a received packet. For example,
2915 * Intel's DOS drivers use 2 (!) RX descriptors with the total ring
2916 * size being only 8 descriptors! Note that we fetch descriptors
2917 * only when the cache is empty to reduce the number of memory reads
2918 * in case of frequent RDT writes. Don't fetch anything when the
2919 * receiver is disabled either as RDH, RDT, RDLEN can be in some
2920 * messed up state.
2921 * Note that despite the cache may seem empty, meaning that there are
2922 * no more available descriptors in it, it may still be used by RX
2923 * thread which has not yet written the last descriptor back but has
2924 * temporarily released the RX lock in order to write the packet body
2925 * to descriptor's buffer. At this point we still going to do prefetch
2926 * but it won't actually fetch anything if there are no unused slots in
2927 * our "empty" cache (nRxDFetched==E1K_RXD_CACHE_SIZE). We must not
2928 * reset the cache here even if it appears empty. It will be reset at
2929 * a later point in e1kRxDGet().
2930 */
2931 if (e1kRxDIsCacheEmpty(pState) && (RCTL & RCTL_EN))
2932 e1kRxDPrefetch(pState);
2933#endif /* E1K_WITH_RXD_CACHE */
2934 e1kCsRxLeave(pState);
2935 if (RT_SUCCESS(rc))
2936 {
2937/** @todo bird: Use SUPSem* for this so we can signal it in ring-0 as well
2938 * without requiring any context switches. We should also check the
2939 * wait condition before bothering to queue the item as we're currently
2940 * queuing thousands of items per second here in a normal transmit
2941 * scenario. Expect performance changes when fixing this! */
2942#ifdef IN_RING3
2943 /* Signal that we have more receive descriptors available. */
2944 e1kWakeupReceive(pState->CTX_SUFF(pDevIns));
2945#else
2946 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pCanRxQueue));
2947 if (pItem)
2948 PDMQueueInsert(pState->CTX_SUFF(pCanRxQueue), pItem);
2949#endif
2950 }
2951 }
2952 return rc;
2953}
2954
2955/**
2956 * Write handler for Receive Delay Timer register.
2957 *
2958 * @param pState The device state structure.
2959 * @param offset Register offset in memory-mapped frame.
2960 * @param index Register index in register array.
2961 * @param value The value to store.
2962 * @param mask Used to implement partial writes (8 and 16-bit).
2963 * @thread EMT
2964 */
2965static int e1kRegWriteRDTR(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
2966{
2967 e1kRegWriteDefault(pState, offset, index, value);
2968 if (value & RDTR_FPD)
2969 {
2970 /* Flush requested, cancel both timers and raise interrupt */
2971#ifdef E1K_USE_RX_TIMERS
2972 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
2973 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
2974#endif
2975 E1K_INC_ISTAT_CNT(pState->uStatIntRDTR);
2976 return e1kRaiseInterrupt(pState, VINF_IOM_R3_MMIO_WRITE, ICR_RXT0);
2977 }
2978
2979 return VINF_SUCCESS;
2980}
2981
2982DECLINLINE(uint32_t) e1kGetTxLen(E1KSTATE* pState)
2983{
2984 /**
2985 * Make sure TDT won't change during computation. EMT may modify TDT at
2986 * any moment.
2987 */
2988 uint32_t tdt = TDT;
2989 return (TDH>tdt ? TDLEN/sizeof(E1KTXDESC) : 0) + tdt - TDH;
2990}
2991
2992#ifdef IN_RING3
2993#ifdef E1K_USE_TX_TIMERS
2994
2995/**
2996 * Transmit Interrupt Delay Timer handler.
2997 *
2998 * @remarks We only get here when the timer expires.
2999 *
3000 * @param pDevIns Pointer to device instance structure.
3001 * @param pTimer Pointer to the timer.
3002 * @param pvUser NULL.
3003 * @thread EMT
3004 */
3005static DECLCALLBACK(void) e1kTxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3006{
3007 E1KSTATE *pState = (E1KSTATE *)pvUser;
3008
3009 E1K_INC_ISTAT_CNT(pState->uStatTID);
3010 /* Cancel absolute delay timer as we have already got attention */
3011#ifndef E1K_NO_TAD
3012 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
3013#endif /* E1K_NO_TAD */
3014 e1kRaiseInterrupt(pState, ICR_TXDW);
3015}
3016
3017/**
3018 * Transmit Absolute Delay Timer handler.
3019 *
3020 * @remarks We only get here when the timer expires.
3021 *
3022 * @param pDevIns Pointer to device instance structure.
3023 * @param pTimer Pointer to the timer.
3024 * @param pvUser NULL.
3025 * @thread EMT
3026 */
3027static DECLCALLBACK(void) e1kTxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3028{
3029 E1KSTATE *pState = (E1KSTATE *)pvUser;
3030
3031 E1K_INC_ISTAT_CNT(pState->uStatTAD);
3032 /* Cancel interrupt delay timer as we have already got attention */
3033 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
3034 e1kRaiseInterrupt(pState, ICR_TXDW);
3035}
3036
3037#endif /* E1K_USE_TX_TIMERS */
3038#ifdef E1K_USE_RX_TIMERS
3039
3040/**
3041 * Receive Interrupt Delay Timer handler.
3042 *
3043 * @remarks We only get here when the timer expires.
3044 *
3045 * @param pDevIns Pointer to device instance structure.
3046 * @param pTimer Pointer to the timer.
3047 * @param pvUser NULL.
3048 * @thread EMT
3049 */
3050static DECLCALLBACK(void) e1kRxIntDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3051{
3052 E1KSTATE *pState = (E1KSTATE *)pvUser;
3053
3054 E1K_INC_ISTAT_CNT(pState->uStatRID);
3055 /* Cancel absolute delay timer as we have already got attention */
3056 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
3057 e1kRaiseInterrupt(pState, ICR_RXT0);
3058}
3059
3060/**
3061 * Receive Absolute Delay Timer handler.
3062 *
3063 * @remarks We only get here when the timer expires.
3064 *
3065 * @param pDevIns Pointer to device instance structure.
3066 * @param pTimer Pointer to the timer.
3067 * @param pvUser NULL.
3068 * @thread EMT
3069 */
3070static DECLCALLBACK(void) e1kRxAbsDelayTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3071{
3072 E1KSTATE *pState = (E1KSTATE *)pvUser;
3073
3074 E1K_INC_ISTAT_CNT(pState->uStatRAD);
3075 /* Cancel interrupt delay timer as we have already got attention */
3076 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
3077 e1kRaiseInterrupt(pState, ICR_RXT0);
3078}
3079
3080#endif /* E1K_USE_RX_TIMERS */
3081
3082/**
3083 * Late Interrupt Timer handler.
3084 *
3085 * @param pDevIns Pointer to device instance structure.
3086 * @param pTimer Pointer to the timer.
3087 * @param pvUser NULL.
3088 * @thread EMT
3089 */
3090static DECLCALLBACK(void) e1kLateIntTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3091{
3092 E1KSTATE *pState = (E1KSTATE *)pvUser;
3093
3094 STAM_PROFILE_ADV_START(&pState->StatLateIntTimer, a);
3095 STAM_COUNTER_INC(&pState->StatLateInts);
3096 E1K_INC_ISTAT_CNT(pState->uStatIntLate);
3097#if 0
3098 if (pState->iStatIntLost > -100)
3099 pState->iStatIntLost--;
3100#endif
3101 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, 0);
3102 STAM_PROFILE_ADV_STOP(&pState->StatLateIntTimer, a);
3103}
3104
3105/**
3106 * Link Up Timer handler.
3107 *
3108 * @param pDevIns Pointer to device instance structure.
3109 * @param pTimer Pointer to the timer.
3110 * @param pvUser NULL.
3111 * @thread EMT
3112 */
3113static DECLCALLBACK(void) e1kLinkUpTimer(PPDMDEVINS pDevIns, PTMTIMER pTimer, void *pvUser)
3114{
3115 E1KSTATE *pState = (E1KSTATE *)pvUser;
3116
3117 /*
3118 * This can happen if we set the link status to down when the Link up timer was
3119 * already armed (shortly after e1kLoadDone() or when the cable was disconnected
3120 * and connect+disconnect the cable very quick.
3121 */
3122 if (!pState->fCableConnected)
3123 return;
3124
3125 E1kLog(("%s e1kLinkUpTimer: Link is up\n", INSTANCE(pState)));
3126 STATUS |= STATUS_LU;
3127 Phy::setLinkStatus(&pState->phy, true);
3128 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
3129}
3130
3131#endif /* IN_RING3 */
3132
3133/**
3134 * Sets up the GSO context according to the TSE new context descriptor.
3135 *
3136 * @param pGso The GSO context to setup.
3137 * @param pCtx The context descriptor.
3138 */
3139DECLINLINE(void) e1kSetupGsoCtx(PPDMNETWORKGSO pGso, E1KTXCTX const *pCtx)
3140{
3141 pGso->u8Type = PDMNETWORKGSOTYPE_INVALID;
3142
3143 /*
3144 * See if the context descriptor describes something that could be TCP or
3145 * UDP over IPv[46].
3146 */
3147 /* Check the header ordering and spacing: 1. Ethernet, 2. IP, 3. TCP/UDP. */
3148 if (RT_UNLIKELY( pCtx->ip.u8CSS < sizeof(RTNETETHERHDR) ))
3149 {
3150 E1kLog(("e1kSetupGsoCtx: IPCSS=%#x\n", pCtx->ip.u8CSS));
3151 return;
3152 }
3153 if (RT_UNLIKELY( pCtx->tu.u8CSS < (size_t)pCtx->ip.u8CSS + (pCtx->dw2.fIP ? RTNETIPV4_MIN_LEN : RTNETIPV6_MIN_LEN) ))
3154 {
3155 E1kLog(("e1kSetupGsoCtx: TUCSS=%#x\n", pCtx->tu.u8CSS));
3156 return;
3157 }
3158 if (RT_UNLIKELY( pCtx->dw2.fTCP
3159 ? pCtx->dw3.u8HDRLEN < (size_t)pCtx->tu.u8CSS + RTNETTCP_MIN_LEN
3160 : pCtx->dw3.u8HDRLEN != (size_t)pCtx->tu.u8CSS + RTNETUDP_MIN_LEN ))
3161 {
3162 E1kLog(("e1kSetupGsoCtx: HDRLEN=%#x TCP=%d\n", pCtx->dw3.u8HDRLEN, pCtx->dw2.fTCP));
3163 return;
3164 }
3165
3166 /* The end of the TCP/UDP checksum should stop at the end of the packet or at least after the headers. */
3167 if (RT_UNLIKELY( pCtx->tu.u16CSE > 0 && pCtx->tu.u16CSE <= pCtx->dw3.u8HDRLEN ))
3168 {
3169 E1kLog(("e1kSetupGsoCtx: TUCSE=%#x HDRLEN=%#x\n", pCtx->tu.u16CSE, pCtx->dw3.u8HDRLEN));
3170 return;
3171 }
3172
3173 /* IPv4 checksum offset. */
3174 if (RT_UNLIKELY( pCtx->dw2.fIP && (size_t)pCtx->ip.u8CSO - pCtx->ip.u8CSS != RT_UOFFSETOF(RTNETIPV4, ip_sum) ))
3175 {
3176 E1kLog(("e1kSetupGsoCtx: IPCSO=%#x IPCSS=%#x\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS));
3177 return;
3178 }
3179
3180 /* TCP/UDP checksum offsets. */
3181 if (RT_UNLIKELY( (size_t)pCtx->tu.u8CSO - pCtx->tu.u8CSS
3182 != ( pCtx->dw2.fTCP
3183 ? RT_UOFFSETOF(RTNETTCP, th_sum)
3184 : RT_UOFFSETOF(RTNETUDP, uh_sum) ) ))
3185 {
3186 E1kLog(("e1kSetupGsoCtx: TUCSO=%#x TUCSS=%#x TCP=%d\n", pCtx->ip.u8CSO, pCtx->ip.u8CSS, pCtx->dw2.fTCP));
3187 return;
3188 }
3189
3190 /*
3191 * Because of internal networking using a 16-bit size field for GSO context
3192 * plus frame, we have to make sure we don't exceed this.
3193 */
3194 if (RT_UNLIKELY( pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN > VBOX_MAX_GSO_SIZE ))
3195 {
3196 E1kLog(("e1kSetupGsoCtx: HDRLEN(=%#x) + PAYLEN(=%#x) = %#x, max is %#x\n",
3197 pCtx->dw3.u8HDRLEN, pCtx->dw2.u20PAYLEN, pCtx->dw3.u8HDRLEN + pCtx->dw2.u20PAYLEN, VBOX_MAX_GSO_SIZE));
3198 return;
3199 }
3200
3201 /*
3202 * We're good for now - we'll do more checks when seeing the data.
3203 * So, figure the type of offloading and setup the context.
3204 */
3205 if (pCtx->dw2.fIP)
3206 {
3207 if (pCtx->dw2.fTCP)
3208 {
3209 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_TCP;
3210 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN;
3211 }
3212 else
3213 {
3214 pGso->u8Type = PDMNETWORKGSOTYPE_IPV4_UDP;
3215 pGso->cbHdrsSeg = pCtx->tu.u8CSS; /* IP header only */
3216 }
3217 /** @todo Detect IPv4-IPv6 tunneling (need test setup since linux doesn't do
3218 * this yet it seems)... */
3219 }
3220 else
3221 {
3222 pGso->cbHdrsSeg = pCtx->dw3.u8HDRLEN; /* @todo IPv6 UFO */
3223 if (pCtx->dw2.fTCP)
3224 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_TCP;
3225 else
3226 pGso->u8Type = PDMNETWORKGSOTYPE_IPV6_UDP;
3227 }
3228 pGso->offHdr1 = pCtx->ip.u8CSS;
3229 pGso->offHdr2 = pCtx->tu.u8CSS;
3230 pGso->cbHdrsTotal = pCtx->dw3.u8HDRLEN;
3231 pGso->cbMaxSeg = pCtx->dw3.u16MSS;
3232 Assert(PDMNetGsoIsValid(pGso, sizeof(*pGso), pGso->cbMaxSeg * 5));
3233 E1kLog2(("e1kSetupGsoCtx: mss=%#x hdr=%#x hdrseg=%#x hdr1=%#x hdr2=%#x %s\n",
3234 pGso->cbMaxSeg, pGso->cbHdrsTotal, pGso->cbHdrsSeg, pGso->offHdr1, pGso->offHdr2, PDMNetGsoTypeName((PDMNETWORKGSOTYPE)pGso->u8Type) ));
3235}
3236
3237/**
3238 * Checks if we can use GSO processing for the current TSE frame.
3239 *
3240 * @param pGso The GSO context.
3241 * @param pData The first data descriptor of the frame.
3242 * @param pCtx The TSO context descriptor.
3243 */
3244DECLINLINE(bool) e1kCanDoGso(PCPDMNETWORKGSO pGso, E1KTXDAT const *pData, E1KTXCTX const *pCtx)
3245{
3246 if (!pData->cmd.fTSE)
3247 {
3248 E1kLog2(("e1kCanDoGso: !TSE\n"));
3249 return false;
3250 }
3251 if (pData->cmd.fVLE) /** @todo VLAN tagging. */
3252 {
3253 E1kLog(("e1kCanDoGso: VLE\n"));
3254 return false;
3255 }
3256
3257 switch ((PDMNETWORKGSOTYPE)pGso->u8Type)
3258 {
3259 case PDMNETWORKGSOTYPE_IPV4_TCP:
3260 case PDMNETWORKGSOTYPE_IPV4_UDP:
3261 if (!pData->dw3.fIXSM)
3262 {
3263 E1kLog(("e1kCanDoGso: !IXSM (IPv4)\n"));
3264 return false;
3265 }
3266 if (!pData->dw3.fTXSM)
3267 {
3268 E1kLog(("e1kCanDoGso: !TXSM (IPv4)\n"));
3269 return false;
3270 }
3271 /** @todo what more check should we perform here? Ethernet frame type? */
3272 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3273 return true;
3274
3275 case PDMNETWORKGSOTYPE_IPV6_TCP:
3276 case PDMNETWORKGSOTYPE_IPV6_UDP:
3277 if (pData->dw3.fIXSM && pCtx->ip.u8CSO)
3278 {
3279 E1kLog(("e1kCanDoGso: IXSM (IPv6)\n"));
3280 return false;
3281 }
3282 if (!pData->dw3.fTXSM)
3283 {
3284 E1kLog(("e1kCanDoGso: TXSM (IPv6)\n"));
3285 return false;
3286 }
3287 /** @todo what more check should we perform here? Ethernet frame type? */
3288 E1kLog2(("e1kCanDoGso: OK, IPv4\n"));
3289 return true;
3290
3291 default:
3292 Assert(pGso->u8Type == PDMNETWORKGSOTYPE_INVALID);
3293 E1kLog2(("e1kCanDoGso: e1kSetupGsoCtx failed\n"));
3294 return false;
3295 }
3296}
3297
3298/**
3299 * Frees the current xmit buffer.
3300 *
3301 * @param pState The device state structure.
3302 */
3303static void e1kXmitFreeBuf(E1KSTATE *pState)
3304{
3305 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3306 if (pSg)
3307 {
3308 pState->CTX_SUFF(pTxSg) = NULL;
3309
3310 if (pSg->pvAllocator != pState)
3311 {
3312 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3313 if (pDrv)
3314 pDrv->pfnFreeBuf(pDrv, pSg);
3315 }
3316 else
3317 {
3318 /* loopback */
3319 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3320 Assert(pSg->fFlags == (PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3));
3321 pSg->fFlags = 0;
3322 pSg->pvAllocator = NULL;
3323 }
3324 }
3325}
3326
3327#ifndef E1K_WITH_TXD_CACHE
3328/**
3329 * Allocates an xmit buffer.
3330 *
3331 * @returns See PDMINETWORKUP::pfnAllocBuf.
3332 * @param pState The device state structure.
3333 * @param cbMin The minimum frame size.
3334 * @param fExactSize Whether cbMin is exact or if we have to max it
3335 * out to the max MTU size.
3336 * @param fGso Whether this is a GSO frame or not.
3337 */
3338DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, size_t cbMin, bool fExactSize, bool fGso)
3339{
3340 /* Adjust cbMin if necessary. */
3341 if (!fExactSize)
3342 cbMin = RT_MAX(cbMin, E1K_MAX_TX_PKT_SIZE);
3343
3344 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3345 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3346 e1kXmitFreeBuf(pState);
3347 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3348
3349 /*
3350 * Allocate the buffer.
3351 */
3352 PPDMSCATTERGATHER pSg;
3353 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3354 {
3355 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3356 if (RT_UNLIKELY(!pDrv))
3357 return VERR_NET_DOWN;
3358 int rc = pDrv->pfnAllocBuf(pDrv, cbMin, fGso ? &pState->GsoCtx : NULL, &pSg);
3359 if (RT_FAILURE(rc))
3360 {
3361 /* Suspend TX as we are out of buffers atm */
3362 STATUS |= STATUS_TXOFF;
3363 return rc;
3364 }
3365 }
3366 else
3367 {
3368 /* Create a loopback using the fallback buffer and preallocated SG. */
3369 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3370 pSg = &pState->uTxFallback.Sg;
3371 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3372 pSg->cbUsed = 0;
3373 pSg->cbAvailable = 0;
3374 pSg->pvAllocator = pState;
3375 pSg->pvUser = NULL; /* No GSO here. */
3376 pSg->cSegs = 1;
3377 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3378 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3379 }
3380
3381 pState->CTX_SUFF(pTxSg) = pSg;
3382 return VINF_SUCCESS;
3383}
3384#else /* E1K_WITH_TXD_CACHE */
3385/**
3386 * Allocates an xmit buffer.
3387 *
3388 * @returns See PDMINETWORKUP::pfnAllocBuf.
3389 * @param pState The device state structure.
3390 * @param cbMin The minimum frame size.
3391 * @param fExactSize Whether cbMin is exact or if we have to max it
3392 * out to the max MTU size.
3393 * @param fGso Whether this is a GSO frame or not.
3394 */
3395DECLINLINE(int) e1kXmitAllocBuf(E1KSTATE *pState, bool fGso)
3396{
3397 /* Deal with existing buffer (descriptor screw up, reset, etc). */
3398 if (RT_UNLIKELY(pState->CTX_SUFF(pTxSg)))
3399 e1kXmitFreeBuf(pState);
3400 Assert(pState->CTX_SUFF(pTxSg) == NULL);
3401
3402 /*
3403 * Allocate the buffer.
3404 */
3405 PPDMSCATTERGATHER pSg;
3406 if (RT_LIKELY(GET_BITS(RCTL, LBM) != RCTL_LBM_TCVR))
3407 {
3408 if (pState->cbTxAlloc == 0)
3409 {
3410 /* Zero packet, no need for the buffer */
3411 return VINF_SUCCESS;
3412 }
3413
3414 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3415 if (RT_UNLIKELY(!pDrv))
3416 return VERR_NET_DOWN;
3417 int rc = pDrv->pfnAllocBuf(pDrv, pState->cbTxAlloc, fGso ? &pState->GsoCtx : NULL, &pSg);
3418 if (RT_FAILURE(rc))
3419 {
3420 /* Suspend TX as we are out of buffers atm */
3421 STATUS |= STATUS_TXOFF;
3422 return rc;
3423 }
3424 E1kLog3(("%s Allocated buffer for TX packet: cb=%u %s%s\n",
3425 INSTANCE(pState), pState->cbTxAlloc,
3426 pState->fVTag ? "VLAN " : "",
3427 pState->fGSO ? "GSO " : ""));
3428 pState->cbTxAlloc = 0;
3429 }
3430 else
3431 {
3432 /* Create a loopback using the fallback buffer and preallocated SG. */
3433 AssertCompileMemberSize(E1KSTATE, uTxFallback.Sg, 8 * sizeof(size_t));
3434 pSg = &pState->uTxFallback.Sg;
3435 pSg->fFlags = PDMSCATTERGATHER_FLAGS_MAGIC | PDMSCATTERGATHER_FLAGS_OWNER_3;
3436 pSg->cbUsed = 0;
3437 pSg->cbAvailable = 0;
3438 pSg->pvAllocator = pState;
3439 pSg->pvUser = NULL; /* No GSO here. */
3440 pSg->cSegs = 1;
3441 pSg->aSegs[0].pvSeg = pState->aTxPacketFallback;
3442 pSg->aSegs[0].cbSeg = sizeof(pState->aTxPacketFallback);
3443 }
3444
3445 pState->CTX_SUFF(pTxSg) = pSg;
3446 return VINF_SUCCESS;
3447}
3448#endif /* E1K_WITH_TXD_CACHE */
3449
3450/**
3451 * Checks if it's a GSO buffer or not.
3452 *
3453 * @returns true / false.
3454 * @param pTxSg The scatter / gather buffer.
3455 */
3456DECLINLINE(bool) e1kXmitIsGsoBuf(PDMSCATTERGATHER const *pTxSg)
3457{
3458#if 0
3459 if (!pTxSg)
3460 E1kLog(("e1kXmitIsGsoBuf: pTxSG is NULL\n"));
3461 if (pTxSg && pTxSg->pvUser)
3462 E1kLog(("e1kXmitIsGsoBuf: pvUser is NULL\n"));
3463#endif
3464 return pTxSg && pTxSg->pvUser /* GSO indicator */;
3465}
3466
3467#ifndef E1K_WITH_TXD_CACHE
3468/**
3469 * Load transmit descriptor from guest memory.
3470 *
3471 * @param pState The device state structure.
3472 * @param pDesc Pointer to descriptor union.
3473 * @param addr Physical address in guest context.
3474 * @thread E1000_TX
3475 */
3476DECLINLINE(void) e1kLoadDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3477{
3478 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3479}
3480#else /* E1K_WITH_TXD_CACHE */
3481/**
3482 * Load transmit descriptors from guest memory.
3483 *
3484 * We need two physical reads in case the tail wrapped around the end of TX
3485 * descriptor ring.
3486 *
3487 * @returns the actual number of descriptors fetched.
3488 * @param pState The device state structure.
3489 * @param pDesc Pointer to descriptor union.
3490 * @param addr Physical address in guest context.
3491 * @thread E1000_TX
3492 */
3493DECLINLINE(unsigned) e1kTxDLoadMore(E1KSTATE* pState)
3494{
3495 Assert(pState->iTxDCurrent == 0);
3496 /* We've already loaded pState->nTxDFetched descriptors past TDH. */
3497 unsigned nDescsAvailable = e1kGetTxLen(pState) - pState->nTxDFetched;
3498 unsigned nDescsToFetch = RT_MIN(nDescsAvailable, E1K_TXD_CACHE_SIZE - pState->nTxDFetched);
3499 unsigned nDescsTotal = TDLEN / sizeof(E1KTXDESC);
3500 unsigned nFirstNotLoaded = (TDH + pState->nTxDFetched) % nDescsTotal;
3501 unsigned nDescsInSingleRead = RT_MIN(nDescsToFetch, nDescsTotal - nFirstNotLoaded);
3502 E1kLog3(("%s e1kTxDLoadMore: nDescsAvailable=%u nDescsToFetch=%u "
3503 "nDescsTotal=%u nFirstNotLoaded=0x%x nDescsInSingleRead=%u\n",
3504 INSTANCE(pState), nDescsAvailable, nDescsToFetch, nDescsTotal,
3505 nFirstNotLoaded, nDescsInSingleRead));
3506 if (nDescsToFetch == 0)
3507 return 0;
3508 E1KTXDESC* pFirstEmptyDesc = &pState->aTxDescriptors[pState->nTxDFetched];
3509 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3510 ((uint64_t)TDBAH << 32) + TDBAL + nFirstNotLoaded * sizeof(E1KTXDESC),
3511 pFirstEmptyDesc, nDescsInSingleRead * sizeof(E1KTXDESC));
3512 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x(0x%x), TDLEN=%08x, TDH=%08x, TDT=%08x\n",
3513 INSTANCE(pState), nDescsInSingleRead,
3514 TDBAH, TDBAL + TDH * sizeof(E1KTXDESC),
3515 nFirstNotLoaded, TDLEN, TDH, TDT));
3516 if (nDescsToFetch > nDescsInSingleRead)
3517 {
3518 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns),
3519 ((uint64_t)TDBAH << 32) + TDBAL,
3520 pFirstEmptyDesc + nDescsInSingleRead,
3521 (nDescsToFetch - nDescsInSingleRead) * sizeof(E1KTXDESC));
3522 E1kLog3(("%s Fetched %u TX descriptors at %08x%08x\n",
3523 INSTANCE(pState), nDescsToFetch - nDescsInSingleRead,
3524 TDBAH, TDBAL));
3525 }
3526 pState->nTxDFetched += nDescsToFetch;
3527 return nDescsToFetch;
3528}
3529
3530/**
3531 * Load transmit descriptors from guest memory only if there are no loaded
3532 * descriptors.
3533 *
3534 * @returns true if there are descriptors in cache.
3535 * @param pState The device state structure.
3536 * @param pDesc Pointer to descriptor union.
3537 * @param addr Physical address in guest context.
3538 * @thread E1000_TX
3539 */
3540DECLINLINE(bool) e1kTxDLazyLoad(E1KSTATE* pState)
3541{
3542 if (pState->nTxDFetched == 0)
3543 return e1kTxDLoadMore(pState) != 0;
3544 return true;
3545}
3546#endif /* E1K_WITH_TXD_CACHE */
3547
3548/**
3549 * Write back transmit descriptor to guest memory.
3550 *
3551 * @param pState The device state structure.
3552 * @param pDesc Pointer to descriptor union.
3553 * @param addr Physical address in guest context.
3554 * @thread E1000_TX
3555 */
3556DECLINLINE(void) e1kWriteBackDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
3557{
3558 /* Only the last half of the descriptor has to be written back. */
3559 e1kPrintTDesc(pState, pDesc, "^^^");
3560 PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), addr, pDesc, sizeof(E1KTXDESC));
3561}
3562
3563/**
3564 * Transmit complete frame.
3565 *
3566 * @remarks We skip the FCS since we're not responsible for sending anything to
3567 * a real ethernet wire.
3568 *
3569 * @param pState The device state structure.
3570 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3571 * @thread E1000_TX
3572 */
3573static void e1kTransmitFrame(E1KSTATE* pState, bool fOnWorkerThread)
3574{
3575 PPDMSCATTERGATHER pSg = pState->CTX_SUFF(pTxSg);
3576 uint32_t cbFrame = pSg ? (uint32_t)pSg->cbUsed : 0;
3577 Assert(!pSg || pSg->cSegs == 1);
3578
3579 if (cbFrame > 70) /* unqualified guess */
3580 pState->led.Asserted.s.fWriting = pState->led.Actual.s.fWriting = 1;
3581
3582 /* Add VLAN tag */
3583 if (cbFrame > 12 && pState->fVTag)
3584 {
3585 E1kLog3(("%s Inserting VLAN tag %08x\n",
3586 INSTANCE(pState), RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16)));
3587 memmove((uint8_t*)pSg->aSegs[0].pvSeg + 16, (uint8_t*)pSg->aSegs[0].pvSeg + 12, cbFrame - 12);
3588 *((uint32_t*)pSg->aSegs[0].pvSeg + 3) = RT_BE2H_U16(VET) | (RT_BE2H_U16(pState->u16VTagTCI) << 16);
3589 pSg->cbUsed += 4;
3590 cbFrame += 4;
3591 Assert(pSg->cbUsed == cbFrame);
3592 Assert(pSg->cbUsed <= pSg->cbAvailable);
3593 }
3594/* E1kLog2(("%s < < < Outgoing packet. Dump follows: > > >\n"
3595 "%.*Rhxd\n"
3596 "%s < < < < < < < < < < < < < End of dump > > > > > > > > > > > >\n",
3597 INSTANCE(pState), cbFrame, pSg->aSegs[0].pvSeg, INSTANCE(pState)));*/
3598
3599 /* Update the stats */
3600 E1K_INC_CNT32(TPT);
3601 E1K_ADD_CNT64(TOTL, TOTH, cbFrame);
3602 E1K_INC_CNT32(GPTC);
3603 if (pSg && e1kIsBroadcast(pSg->aSegs[0].pvSeg))
3604 E1K_INC_CNT32(BPTC);
3605 else if (pSg && e1kIsMulticast(pSg->aSegs[0].pvSeg))
3606 E1K_INC_CNT32(MPTC);
3607 /* Update octet transmit counter */
3608 E1K_ADD_CNT64(GOTCL, GOTCH, cbFrame);
3609 if (pState->CTX_SUFF(pDrv))
3610 STAM_REL_COUNTER_ADD(&pState->StatTransmitBytes, cbFrame);
3611 if (cbFrame == 64)
3612 E1K_INC_CNT32(PTC64);
3613 else if (cbFrame < 128)
3614 E1K_INC_CNT32(PTC127);
3615 else if (cbFrame < 256)
3616 E1K_INC_CNT32(PTC255);
3617 else if (cbFrame < 512)
3618 E1K_INC_CNT32(PTC511);
3619 else if (cbFrame < 1024)
3620 E1K_INC_CNT32(PTC1023);
3621 else
3622 E1K_INC_CNT32(PTC1522);
3623
3624 E1K_INC_ISTAT_CNT(pState->uStatTxFrm);
3625
3626 /*
3627 * Dump and send the packet.
3628 */
3629 int rc = VERR_NET_DOWN;
3630 if (pSg && pSg->pvAllocator != pState)
3631 {
3632 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Outgoing");
3633
3634 pState->CTX_SUFF(pTxSg) = NULL;
3635 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
3636 if (pDrv)
3637 {
3638 /* Release critical section to avoid deadlock in CanReceive */
3639 //e1kCsLeave(pState);
3640 STAM_PROFILE_START(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3641 rc = pDrv->pfnSendBuf(pDrv, pSg, fOnWorkerThread);
3642 STAM_PROFILE_STOP(&pState->CTX_SUFF_Z(StatTransmitSend), a);
3643 //e1kCsEnter(pState, RT_SRC_POS);
3644 }
3645 }
3646 else if (pSg)
3647 {
3648 Assert(pSg->aSegs[0].pvSeg == pState->aTxPacketFallback);
3649 e1kPacketDump(pState, (uint8_t const *)pSg->aSegs[0].pvSeg, cbFrame, "--> Loopback");
3650
3651 /** @todo do we actually need to check that we're in loopback mode here? */
3652 if (GET_BITS(RCTL, LBM) == RCTL_LBM_TCVR)
3653 {
3654 E1KRXDST status;
3655 RT_ZERO(status);
3656 status.fPIF = true;
3657 e1kHandleRxPacket(pState, pSg->aSegs[0].pvSeg, cbFrame, status);
3658 rc = VINF_SUCCESS;
3659 }
3660 e1kXmitFreeBuf(pState);
3661 }
3662 else
3663 rc = VERR_NET_DOWN;
3664 if (RT_FAILURE(rc))
3665 {
3666 E1kLogRel(("E1000: ERROR! pfnSend returned %Rrc\n", rc));
3667 /** @todo handle VERR_NET_DOWN and VERR_NET_NO_BUFFER_SPACE. Signal error ? */
3668 }
3669
3670 pState->led.Actual.s.fWriting = 0;
3671}
3672
3673/**
3674 * Compute and write internet checksum (e1kCSum16) at the specified offset.
3675 *
3676 * @param pState The device state structure.
3677 * @param pPkt Pointer to the packet.
3678 * @param u16PktLen Total length of the packet.
3679 * @param cso Offset in packet to write checksum at.
3680 * @param css Offset in packet to start computing
3681 * checksum from.
3682 * @param cse Offset in packet to stop computing
3683 * checksum at.
3684 * @thread E1000_TX
3685 */
3686static void e1kInsertChecksum(E1KSTATE* pState, uint8_t *pPkt, uint16_t u16PktLen, uint8_t cso, uint8_t css, uint16_t cse)
3687{
3688 if (css >= u16PktLen)
3689 {
3690 E1kLog2(("%s css(%X) is greater than packet length-1(%X), checksum is not inserted\n",
3691 INSTANCE(pState), cso, u16PktLen));
3692 return;
3693 }
3694
3695 if (cso >= u16PktLen - 1)
3696 {
3697 E1kLog2(("%s cso(%X) is greater than packet length-2(%X), checksum is not inserted\n",
3698 INSTANCE(pState), cso, u16PktLen));
3699 return;
3700 }
3701
3702 if (cse == 0)
3703 cse = u16PktLen - 1;
3704 uint16_t u16ChkSum = e1kCSum16(pPkt + css, cse - css + 1);
3705 E1kLog2(("%s Inserting csum: %04X at %02X, old value: %04X\n", INSTANCE(pState),
3706 u16ChkSum, cso, *(uint16_t*)(pPkt + cso)));
3707 *(uint16_t*)(pPkt + cso) = u16ChkSum;
3708}
3709
3710/**
3711 * Add a part of descriptor's buffer to transmit frame.
3712 *
3713 * @remarks data.u64BufAddr is used unconditionally for both data
3714 * and legacy descriptors since it is identical to
3715 * legacy.u64BufAddr.
3716 *
3717 * @param pState The device state structure.
3718 * @param pDesc Pointer to the descriptor to transmit.
3719 * @param u16Len Length of buffer to the end of segment.
3720 * @param fSend Force packet sending.
3721 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3722 * @thread E1000_TX
3723 */
3724#ifndef E1K_WITH_TXD_CACHE
3725static void e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3726{
3727 /* TCP header being transmitted */
3728 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3729 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3730 /* IP header being transmitted */
3731 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3732 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3733
3734 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3735 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3736 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3737
3738 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3739 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3740 E1kLog3(("%s Dump of the segment:\n"
3741 "%.*Rhxd\n"
3742 "%s --- End of dump ---\n",
3743 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3744 pState->u16TxPktLen += u16Len;
3745 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3746 INSTANCE(pState), pState->u16TxPktLen));
3747 if (pState->u16HdrRemain > 0)
3748 {
3749 /* The header was not complete, check if it is now */
3750 if (u16Len >= pState->u16HdrRemain)
3751 {
3752 /* The rest is payload */
3753 u16Len -= pState->u16HdrRemain;
3754 pState->u16HdrRemain = 0;
3755 /* Save partial checksum and flags */
3756 pState->u32SavedCsum = pTcpHdr->chksum;
3757 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
3758 /* Clear FIN and PSH flags now and set them only in the last segment */
3759 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
3760 }
3761 else
3762 {
3763 /* Still not */
3764 pState->u16HdrRemain -= u16Len;
3765 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
3766 INSTANCE(pState), pState->u16HdrRemain));
3767 return;
3768 }
3769 }
3770
3771 pState->u32PayRemain -= u16Len;
3772
3773 if (fSend)
3774 {
3775 /* Leave ethernet header intact */
3776 /* IP Total Length = payload + headers - ethernet header */
3777 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
3778 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
3779 INSTANCE(pState), ntohs(pIpHdr->total_len)));
3780 /* Update IP Checksum */
3781 pIpHdr->chksum = 0;
3782 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3783 pState->contextTSE.ip.u8CSO,
3784 pState->contextTSE.ip.u8CSS,
3785 pState->contextTSE.ip.u16CSE);
3786
3787 /* Update TCP flags */
3788 /* Restore original FIN and PSH flags for the last segment */
3789 if (pState->u32PayRemain == 0)
3790 {
3791 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
3792 E1K_INC_CNT32(TSCTC);
3793 }
3794 /* Add TCP length to partial pseudo header sum */
3795 uint32_t csum = pState->u32SavedCsum
3796 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
3797 while (csum >> 16)
3798 csum = (csum >> 16) + (csum & 0xFFFF);
3799 pTcpHdr->chksum = csum;
3800 /* Compute final checksum */
3801 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3802 pState->contextTSE.tu.u8CSO,
3803 pState->contextTSE.tu.u8CSS,
3804 pState->contextTSE.tu.u16CSE);
3805
3806 /*
3807 * Transmit it. If we've use the SG already, allocate a new one before
3808 * we copy of the data.
3809 */
3810 if (!pState->CTX_SUFF(pTxSg))
3811 e1kXmitAllocBuf(pState, pState->u16TxPktLen + (pState->fVTag ? 4 : 0), true /*fExactSize*/, false /*fGso*/);
3812 if (pState->CTX_SUFF(pTxSg))
3813 {
3814 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
3815 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
3816 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
3817 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
3818 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
3819 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
3820 }
3821 e1kTransmitFrame(pState, fOnWorkerThread);
3822
3823 /* Update Sequence Number */
3824 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
3825 - pState->contextTSE.dw3.u8HDRLEN);
3826 /* Increment IP identification */
3827 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
3828 }
3829}
3830#else /* E1K_WITH_TXD_CACHE */
3831static int e1kFallbackAddSegment(E1KSTATE* pState, RTGCPHYS PhysAddr, uint16_t u16Len, bool fSend, bool fOnWorkerThread)
3832{
3833 int rc = VINF_SUCCESS;
3834 /* TCP header being transmitted */
3835 struct E1kTcpHeader *pTcpHdr = (struct E1kTcpHeader *)
3836 (pState->aTxPacketFallback + pState->contextTSE.tu.u8CSS);
3837 /* IP header being transmitted */
3838 struct E1kIpHeader *pIpHdr = (struct E1kIpHeader *)
3839 (pState->aTxPacketFallback + pState->contextTSE.ip.u8CSS);
3840
3841 E1kLog3(("%s e1kFallbackAddSegment: Length=%x, remaining payload=%x, header=%x, send=%RTbool\n",
3842 INSTANCE(pState), u16Len, pState->u32PayRemain, pState->u16HdrRemain, fSend));
3843 Assert(pState->u32PayRemain + pState->u16HdrRemain > 0);
3844
3845 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), PhysAddr,
3846 pState->aTxPacketFallback + pState->u16TxPktLen, u16Len);
3847 E1kLog3(("%s Dump of the segment:\n"
3848 "%.*Rhxd\n"
3849 "%s --- End of dump ---\n",
3850 INSTANCE(pState), u16Len, pState->aTxPacketFallback + pState->u16TxPktLen, INSTANCE(pState)));
3851 pState->u16TxPktLen += u16Len;
3852 E1kLog3(("%s e1kFallbackAddSegment: pState->u16TxPktLen=%x\n",
3853 INSTANCE(pState), pState->u16TxPktLen));
3854 if (pState->u16HdrRemain > 0)
3855 {
3856 /* The header was not complete, check if it is now */
3857 if (u16Len >= pState->u16HdrRemain)
3858 {
3859 /* The rest is payload */
3860 u16Len -= pState->u16HdrRemain;
3861 pState->u16HdrRemain = 0;
3862 /* Save partial checksum and flags */
3863 pState->u32SavedCsum = pTcpHdr->chksum;
3864 pState->u16SavedFlags = pTcpHdr->hdrlen_flags;
3865 /* Clear FIN and PSH flags now and set them only in the last segment */
3866 pTcpHdr->hdrlen_flags &= ~htons(E1K_TCP_FIN | E1K_TCP_PSH);
3867 }
3868 else
3869 {
3870 /* Still not */
3871 pState->u16HdrRemain -= u16Len;
3872 E1kLog3(("%s e1kFallbackAddSegment: Header is still incomplete, 0x%x bytes remain.\n",
3873 INSTANCE(pState), pState->u16HdrRemain));
3874 return rc;
3875 }
3876 }
3877
3878 pState->u32PayRemain -= u16Len;
3879
3880 if (fSend)
3881 {
3882 /* Leave ethernet header intact */
3883 /* IP Total Length = payload + headers - ethernet header */
3884 pIpHdr->total_len = htons(pState->u16TxPktLen - pState->contextTSE.ip.u8CSS);
3885 E1kLog3(("%s e1kFallbackAddSegment: End of packet, pIpHdr->total_len=%x\n",
3886 INSTANCE(pState), ntohs(pIpHdr->total_len)));
3887 /* Update IP Checksum */
3888 pIpHdr->chksum = 0;
3889 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3890 pState->contextTSE.ip.u8CSO,
3891 pState->contextTSE.ip.u8CSS,
3892 pState->contextTSE.ip.u16CSE);
3893
3894 /* Update TCP flags */
3895 /* Restore original FIN and PSH flags for the last segment */
3896 if (pState->u32PayRemain == 0)
3897 {
3898 pTcpHdr->hdrlen_flags = pState->u16SavedFlags;
3899 E1K_INC_CNT32(TSCTC);
3900 }
3901 /* Add TCP length to partial pseudo header sum */
3902 uint32_t csum = pState->u32SavedCsum
3903 + htons(pState->u16TxPktLen - pState->contextTSE.tu.u8CSS);
3904 while (csum >> 16)
3905 csum = (csum >> 16) + (csum & 0xFFFF);
3906 pTcpHdr->chksum = csum;
3907 /* Compute final checksum */
3908 e1kInsertChecksum(pState, pState->aTxPacketFallback, pState->u16TxPktLen,
3909 pState->contextTSE.tu.u8CSO,
3910 pState->contextTSE.tu.u8CSS,
3911 pState->contextTSE.tu.u16CSE);
3912
3913 /*
3914 * Transmit it.
3915 */
3916 if (pState->CTX_SUFF(pTxSg))
3917 {
3918 Assert(pState->u16TxPktLen <= pState->CTX_SUFF(pTxSg)->cbAvailable);
3919 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
3920 if (pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg != pState->aTxPacketFallback)
3921 memcpy(pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->aTxPacketFallback, pState->u16TxPktLen);
3922 pState->CTX_SUFF(pTxSg)->cbUsed = pState->u16TxPktLen;
3923 pState->CTX_SUFF(pTxSg)->aSegs[0].cbSeg = pState->u16TxPktLen;
3924 }
3925 e1kTransmitFrame(pState, fOnWorkerThread);
3926
3927 /* Update Sequence Number */
3928 pTcpHdr->seqno = htonl(ntohl(pTcpHdr->seqno) + pState->u16TxPktLen
3929 - pState->contextTSE.dw3.u8HDRLEN);
3930 /* Increment IP identification */
3931 pIpHdr->ident = htons(ntohs(pIpHdr->ident) + 1);
3932
3933 /* Allocate new buffer for the next segment. */
3934 if (pState->u32PayRemain)
3935 {
3936 pState->cbTxAlloc = RT_MIN(pState->u32PayRemain,
3937 pState->contextTSE.dw3.u16MSS)
3938 + pState->contextTSE.dw3.u8HDRLEN
3939 + (pState->fVTag ? 4 : 0);
3940 rc = e1kXmitAllocBuf(pState, false /* fGSO */);
3941 }
3942 }
3943
3944 return rc;
3945}
3946#endif /* E1K_WITH_TXD_CACHE */
3947
3948#ifndef E1K_WITH_TXD_CACHE
3949/**
3950 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
3951 * frame.
3952 *
3953 * We construct the frame in the fallback buffer first and the copy it to the SG
3954 * buffer before passing it down to the network driver code.
3955 *
3956 * @returns true if the frame should be transmitted, false if not.
3957 *
3958 * @param pState The device state structure.
3959 * @param pDesc Pointer to the descriptor to transmit.
3960 * @param cbFragment Length of descriptor's buffer.
3961 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
3962 * @thread E1000_TX
3963 */
3964static bool e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, uint32_t cbFragment, bool fOnWorkerThread)
3965{
3966 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
3967 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
3968 Assert(pDesc->data.cmd.fTSE);
3969 Assert(!e1kXmitIsGsoBuf(pTxSg));
3970
3971 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
3972 Assert(u16MaxPktLen != 0);
3973 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
3974
3975 /*
3976 * Carve out segments.
3977 */
3978 do
3979 {
3980 /* Calculate how many bytes we have left in this TCP segment */
3981 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
3982 if (cb > cbFragment)
3983 {
3984 /* This descriptor fits completely into current segment */
3985 cb = cbFragment;
3986 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
3987 }
3988 else
3989 {
3990 e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
3991 /*
3992 * Rewind the packet tail pointer to the beginning of payload,
3993 * so we continue writing right beyond the header.
3994 */
3995 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
3996 }
3997
3998 pDesc->data.u64BufAddr += cb;
3999 cbFragment -= cb;
4000 } while (cbFragment > 0);
4001
4002 if (pDesc->data.cmd.fEOP)
4003 {
4004 /* End of packet, next segment will contain header. */
4005 if (pState->u32PayRemain != 0)
4006 E1K_INC_CNT32(TSCTFC);
4007 pState->u16TxPktLen = 0;
4008 e1kXmitFreeBuf(pState);
4009 }
4010
4011 return false;
4012}
4013#else /* E1K_WITH_TXD_CACHE */
4014/**
4015 * TCP segmentation offloading fallback: Add descriptor's buffer to transmit
4016 * frame.
4017 *
4018 * We construct the frame in the fallback buffer first and the copy it to the SG
4019 * buffer before passing it down to the network driver code.
4020 *
4021 * @returns error code
4022 *
4023 * @param pState The device state structure.
4024 * @param pDesc Pointer to the descriptor to transmit.
4025 * @param cbFragment Length of descriptor's buffer.
4026 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4027 * @thread E1000_TX
4028 */
4029static int e1kFallbackAddToFrame(E1KSTATE* pState, E1KTXDESC* pDesc, bool fOnWorkerThread)
4030{
4031 int rc = VINF_SUCCESS;
4032 PPDMSCATTERGATHER pTxSg = pState->CTX_SUFF(pTxSg);
4033 Assert(e1kGetDescType(pDesc) == E1K_DTYP_DATA);
4034 Assert(pDesc->data.cmd.fTSE);
4035 Assert(!e1kXmitIsGsoBuf(pTxSg));
4036
4037 uint16_t u16MaxPktLen = pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw3.u16MSS;
4038 Assert(u16MaxPktLen != 0);
4039 Assert(u16MaxPktLen < E1K_MAX_TX_PKT_SIZE);
4040
4041 /*
4042 * Carve out segments.
4043 */
4044 do
4045 {
4046 /* Calculate how many bytes we have left in this TCP segment */
4047 uint32_t cb = u16MaxPktLen - pState->u16TxPktLen;
4048 if (cb > pDesc->data.cmd.u20DTALEN)
4049 {
4050 /* This descriptor fits completely into current segment */
4051 cb = pDesc->data.cmd.u20DTALEN;
4052 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, pDesc->data.cmd.fEOP /*fSend*/, fOnWorkerThread);
4053 }
4054 else
4055 {
4056 rc = e1kFallbackAddSegment(pState, pDesc->data.u64BufAddr, cb, true /*fSend*/, fOnWorkerThread);
4057 /*
4058 * Rewind the packet tail pointer to the beginning of payload,
4059 * so we continue writing right beyond the header.
4060 */
4061 pState->u16TxPktLen = pState->contextTSE.dw3.u8HDRLEN;
4062 }
4063
4064 pDesc->data.u64BufAddr += cb;
4065 pDesc->data.cmd.u20DTALEN -= cb;
4066 } while (pDesc->data.cmd.u20DTALEN > 0 && RT_SUCCESS(rc));
4067
4068 if (pDesc->data.cmd.fEOP)
4069 {
4070 /* End of packet, next segment will contain header. */
4071 if (pState->u32PayRemain != 0)
4072 E1K_INC_CNT32(TSCTFC);
4073 pState->u16TxPktLen = 0;
4074 e1kXmitFreeBuf(pState);
4075 }
4076
4077 return false;
4078}
4079#endif /* E1K_WITH_TXD_CACHE */
4080
4081
4082/**
4083 * Add descriptor's buffer to transmit frame.
4084 *
4085 * This deals with GSO and normal frames, e1kFallbackAddToFrame deals with the
4086 * TSE frames we cannot handle as GSO.
4087 *
4088 * @returns true on success, false on failure.
4089 *
4090 * @param pThis The device state structure.
4091 * @param PhysAddr The physical address of the descriptor buffer.
4092 * @param cbFragment Length of descriptor's buffer.
4093 * @thread E1000_TX
4094 */
4095static bool e1kAddToFrame(E1KSTATE *pThis, RTGCPHYS PhysAddr, uint32_t cbFragment)
4096{
4097 PPDMSCATTERGATHER pTxSg = pThis->CTX_SUFF(pTxSg);
4098 bool const fGso = e1kXmitIsGsoBuf(pTxSg);
4099 uint32_t const cbNewPkt = cbFragment + pThis->u16TxPktLen;
4100
4101 if (RT_UNLIKELY( !fGso && cbNewPkt > E1K_MAX_TX_PKT_SIZE ))
4102 {
4103 E1kLog(("%s Transmit packet is too large: %u > %u(max)\n", INSTANCE(pThis), cbNewPkt, E1K_MAX_TX_PKT_SIZE));
4104 return false;
4105 }
4106 if (RT_UNLIKELY( fGso && cbNewPkt > pTxSg->cbAvailable ))
4107 {
4108 E1kLog(("%s Transmit packet is too large: %u > %u(max)/GSO\n", INSTANCE(pThis), cbNewPkt, pTxSg->cbAvailable));
4109 return false;
4110 }
4111
4112 if (RT_LIKELY(pTxSg))
4113 {
4114 Assert(pTxSg->cSegs == 1);
4115 Assert(pTxSg->cbUsed == pThis->u16TxPktLen);
4116
4117 PDMDevHlpPhysRead(pThis->CTX_SUFF(pDevIns), PhysAddr,
4118 (uint8_t *)pTxSg->aSegs[0].pvSeg + pThis->u16TxPktLen, cbFragment);
4119
4120 pTxSg->cbUsed = cbNewPkt;
4121 }
4122 pThis->u16TxPktLen = cbNewPkt;
4123
4124 return true;
4125}
4126
4127
4128/**
4129 * Write the descriptor back to guest memory and notify the guest.
4130 *
4131 * @param pState The device state structure.
4132 * @param pDesc Pointer to the descriptor have been transmitted.
4133 * @param addr Physical address of the descriptor in guest memory.
4134 * @thread E1000_TX
4135 */
4136static void e1kDescReport(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr)
4137{
4138 /*
4139 * We fake descriptor write-back bursting. Descriptors are written back as they are
4140 * processed.
4141 */
4142 /* Let's pretend we process descriptors. Write back with DD set. */
4143 /*
4144 * Prior to r71586 we tried to accomodate the case when write-back bursts
4145 * are enabled without actually implementing bursting by writing back all
4146 * descriptors, even the ones that do not have RS set. This caused kernel
4147 * panics with Linux SMP kernels, as the e1000 driver tried to free up skb
4148 * associated with written back descriptor if it happened to be a context
4149 * descriptor since context descriptors do not have skb associated to them.
4150 * Starting from r71586 we write back only the descriptors with RS set,
4151 * which is a little bit different from what the real hardware does in
4152 * case there is a chain of data descritors where some of them have RS set
4153 * and others do not. It is very uncommon scenario imho.
4154 * We need to check RPS as well since some legacy drivers use it instead of
4155 * RS even with newer cards.
4156 */
4157 if (pDesc->legacy.cmd.fRS || pDesc->legacy.cmd.fRPS)
4158 {
4159 pDesc->legacy.dw3.fDD = 1; /* Descriptor Done */
4160 e1kWriteBackDesc(pState, pDesc, addr);
4161 if (pDesc->legacy.cmd.fEOP)
4162 {
4163#ifdef E1K_USE_TX_TIMERS
4164 if (pDesc->legacy.cmd.fIDE)
4165 {
4166 E1K_INC_ISTAT_CNT(pState->uStatTxIDE);
4167 //if (pState->fIntRaised)
4168 //{
4169 // /* Interrupt is already pending, no need for timers */
4170 // ICR |= ICR_TXDW;
4171 //}
4172 //else {
4173 /* Arm the timer to fire in TIVD usec (discard .024) */
4174 e1kArmTimer(pState, pState->CTX_SUFF(pTIDTimer), TIDV);
4175# ifndef E1K_NO_TAD
4176 /* If absolute timer delay is enabled and the timer is not running yet, arm it. */
4177 E1kLog2(("%s Checking if TAD timer is running\n",
4178 INSTANCE(pState)));
4179 if (TADV != 0 && !TMTimerIsActive(pState->CTX_SUFF(pTADTimer)))
4180 e1kArmTimer(pState, pState->CTX_SUFF(pTADTimer), TADV);
4181# endif /* E1K_NO_TAD */
4182 }
4183 else
4184 {
4185 E1kLog2(("%s No IDE set, cancel TAD timer and raise interrupt\n",
4186 INSTANCE(pState)));
4187# ifndef E1K_NO_TAD
4188 /* Cancel both timers if armed and fire immediately. */
4189 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
4190# endif /* E1K_NO_TAD */
4191#endif /* E1K_USE_TX_TIMERS */
4192 E1K_INC_ISTAT_CNT(pState->uStatIntTx);
4193 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXDW);
4194#ifdef E1K_USE_TX_TIMERS
4195 }
4196#endif /* E1K_USE_TX_TIMERS */
4197 }
4198 }
4199 else
4200 {
4201 E1K_INC_ISTAT_CNT(pState->uStatTxNoRS);
4202 }
4203}
4204
4205#ifndef E1K_WITH_TXD_CACHE
4206/**
4207 * Process Transmit Descriptor.
4208 *
4209 * E1000 supports three types of transmit descriptors:
4210 * - legacy data descriptors of older format (context-less).
4211 * - data the same as legacy but providing new offloading capabilities.
4212 * - context sets up the context for following data descriptors.
4213 *
4214 * @param pState The device state structure.
4215 * @param pDesc Pointer to descriptor union.
4216 * @param addr Physical address of descriptor in guest memory.
4217 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4218 * @thread E1000_TX
4219 */
4220static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr, bool fOnWorkerThread)
4221{
4222 int rc = VINF_SUCCESS;
4223 uint32_t cbVTag = 0;
4224
4225 e1kPrintTDesc(pState, pDesc, "vvv");
4226
4227#ifdef E1K_USE_TX_TIMERS
4228 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4229#endif /* E1K_USE_TX_TIMERS */
4230
4231 switch (e1kGetDescType(pDesc))
4232 {
4233 case E1K_DTYP_CONTEXT:
4234 if (pDesc->context.dw2.fTSE)
4235 {
4236 pState->contextTSE = pDesc->context;
4237 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4238 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4239 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4240 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4241 }
4242 else
4243 {
4244 pState->contextNormal = pDesc->context;
4245 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4246 }
4247 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4248 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4249 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4250 pDesc->context.ip.u8CSS,
4251 pDesc->context.ip.u8CSO,
4252 pDesc->context.ip.u16CSE,
4253 pDesc->context.tu.u8CSS,
4254 pDesc->context.tu.u8CSO,
4255 pDesc->context.tu.u16CSE));
4256 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4257 e1kDescReport(pState, pDesc, addr);
4258 break;
4259
4260 case E1K_DTYP_DATA:
4261 {
4262 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4263 {
4264 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4265 /** @todo Same as legacy when !TSE. See below. */
4266 break;
4267 }
4268 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4269 &pState->StatTxDescTSEData:
4270 &pState->StatTxDescData);
4271 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4272 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4273
4274 /*
4275 * The last descriptor of non-TSE packet must contain VLE flag.
4276 * TSE packets have VLE flag in the first descriptor. The later
4277 * case is taken care of a bit later when cbVTag gets assigned.
4278 *
4279 * 1) pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE
4280 */
4281 if (pDesc->data.cmd.fEOP && !pDesc->data.cmd.fTSE)
4282 {
4283 pState->fVTag = pDesc->data.cmd.fVLE;
4284 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4285 }
4286 /*
4287 * First fragment: Allocate new buffer and save the IXSM and TXSM
4288 * packet options as these are only valid in the first fragment.
4289 */
4290 if (pState->u16TxPktLen == 0)
4291 {
4292 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4293 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4294 E1kLog2(("%s Saving checksum flags:%s%s; \n", INSTANCE(pState),
4295 pState->fIPcsum ? " IP" : "",
4296 pState->fTCPcsum ? " TCP/UDP" : ""));
4297 if (pDesc->data.cmd.fTSE)
4298 {
4299 /* 2) pDesc->data.cmd.fTSE && pState->u16TxPktLen == 0 */
4300 pState->fVTag = pDesc->data.cmd.fVLE;
4301 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4302 cbVTag = pState->fVTag ? 4 : 0;
4303 }
4304 else if (pDesc->data.cmd.fEOP)
4305 cbVTag = pDesc->data.cmd.fVLE ? 4 : 0;
4306 else
4307 cbVTag = 4;
4308 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4309 if (e1kCanDoGso(&pState->GsoCtx, &pDesc->data, &pState->contextTSE))
4310 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw2.u20PAYLEN + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4311 true /*fExactSize*/, true /*fGso*/);
4312 else if (pDesc->data.cmd.fTSE)
4313 rc = e1kXmitAllocBuf(pState, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN + cbVTag,
4314 pDesc->data.cmd.fTSE /*fExactSize*/, false /*fGso*/);
4315 else
4316 rc = e1kXmitAllocBuf(pState, pDesc->data.cmd.u20DTALEN + cbVTag,
4317 pDesc->data.cmd.fEOP /*fExactSize*/, false /*fGso*/);
4318
4319 /**
4320 * @todo: Perhaps it is not that simple for GSO packets! We may
4321 * need to unwind some changes.
4322 */
4323 if (RT_FAILURE(rc))
4324 {
4325 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4326 break;
4327 }
4328 /** @todo Is there any way to indicating errors other than collisions? Like
4329 * VERR_NET_DOWN. */
4330 }
4331
4332 /*
4333 * Add the descriptor data to the frame. If the frame is complete,
4334 * transmit it and reset the u16TxPktLen field.
4335 */
4336 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4337 {
4338 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4339 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4340 if (pDesc->data.cmd.fEOP)
4341 {
4342 if ( fRc
4343 && pState->CTX_SUFF(pTxSg)
4344 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4345 {
4346 e1kTransmitFrame(pState, fOnWorkerThread);
4347 E1K_INC_CNT32(TSCTC);
4348 }
4349 else
4350 {
4351 if (fRc)
4352 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4353 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4354 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4355 e1kXmitFreeBuf(pState);
4356 E1K_INC_CNT32(TSCTFC);
4357 }
4358 pState->u16TxPktLen = 0;
4359 }
4360 }
4361 else if (!pDesc->data.cmd.fTSE)
4362 {
4363 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4364 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4365 if (pDesc->data.cmd.fEOP)
4366 {
4367 if (fRc && pState->CTX_SUFF(pTxSg))
4368 {
4369 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4370 if (pState->fIPcsum)
4371 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4372 pState->contextNormal.ip.u8CSO,
4373 pState->contextNormal.ip.u8CSS,
4374 pState->contextNormal.ip.u16CSE);
4375 if (pState->fTCPcsum)
4376 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4377 pState->contextNormal.tu.u8CSO,
4378 pState->contextNormal.tu.u8CSS,
4379 pState->contextNormal.tu.u16CSE);
4380 e1kTransmitFrame(pState, fOnWorkerThread);
4381 }
4382 else
4383 e1kXmitFreeBuf(pState);
4384 pState->u16TxPktLen = 0;
4385 }
4386 }
4387 else
4388 {
4389 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4390 e1kFallbackAddToFrame(pState, pDesc, pDesc->data.cmd.u20DTALEN, fOnWorkerThread);
4391 }
4392
4393 e1kDescReport(pState, pDesc, addr);
4394 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4395 break;
4396 }
4397
4398 case E1K_DTYP_LEGACY:
4399 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4400 {
4401 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4402 /** @todo 3.3.3, Length/Buffer Address: RS set -> write DD when processing. */
4403 break;
4404 }
4405 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4406 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4407
4408 /* First fragment: allocate new buffer. */
4409 if (pState->u16TxPktLen == 0)
4410 {
4411 if (pDesc->legacy.cmd.fEOP)
4412 cbVTag = pDesc->legacy.cmd.fVLE ? 4 : 0;
4413 else
4414 cbVTag = 4;
4415 E1kLog3(("%s About to allocate TX buffer: cbVTag=%u\n", INSTANCE(pState), cbVTag));
4416 /** @todo reset status bits? */
4417 rc = e1kXmitAllocBuf(pState, pDesc->legacy.cmd.u16Length + cbVTag, pDesc->legacy.cmd.fEOP, false /*fGso*/);
4418 if (RT_FAILURE(rc))
4419 {
4420 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4421 break;
4422 }
4423
4424 /** @todo Is there any way to indicating errors other than collisions? Like
4425 * VERR_NET_DOWN. */
4426 }
4427
4428 /* Add fragment to frame. */
4429 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4430 {
4431 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4432
4433 /* Last fragment: Transmit and reset the packet storage counter. */
4434 if (pDesc->legacy.cmd.fEOP)
4435 {
4436 pState->fVTag = pDesc->legacy.cmd.fVLE;
4437 pState->u16VTagTCI = pDesc->legacy.dw3.u16Special;
4438 /** @todo Offload processing goes here. */
4439 e1kTransmitFrame(pState, fOnWorkerThread);
4440 pState->u16TxPktLen = 0;
4441 }
4442 }
4443 /* Last fragment + failure: free the buffer and reset the storage counter. */
4444 else if (pDesc->legacy.cmd.fEOP)
4445 {
4446 e1kXmitFreeBuf(pState);
4447 pState->u16TxPktLen = 0;
4448 }
4449
4450 e1kDescReport(pState, pDesc, addr);
4451 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4452 break;
4453
4454 default:
4455 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4456 INSTANCE(pState), e1kGetDescType(pDesc)));
4457 break;
4458 }
4459
4460 return rc;
4461}
4462#else /* E1K_WITH_TXD_CACHE */
4463/**
4464 * Process Transmit Descriptor.
4465 *
4466 * E1000 supports three types of transmit descriptors:
4467 * - legacy data descriptors of older format (context-less).
4468 * - data the same as legacy but providing new offloading capabilities.
4469 * - context sets up the context for following data descriptors.
4470 *
4471 * @param pState The device state structure.
4472 * @param pDesc Pointer to descriptor union.
4473 * @param addr Physical address of descriptor in guest memory.
4474 * @param fOnWorkerThread Whether we're on a worker thread or an EMT.
4475 * @param cbPacketSize Size of the packet as previously computed.
4476 * @thread E1000_TX
4477 */
4478static int e1kXmitDesc(E1KSTATE* pState, E1KTXDESC* pDesc, RTGCPHYS addr,
4479 bool fOnWorkerThread)
4480{
4481 int rc = VINF_SUCCESS;
4482 uint32_t cbVTag = 0;
4483
4484 e1kPrintTDesc(pState, pDesc, "vvv");
4485
4486#ifdef E1K_USE_TX_TIMERS
4487 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
4488#endif /* E1K_USE_TX_TIMERS */
4489
4490 switch (e1kGetDescType(pDesc))
4491 {
4492 case E1K_DTYP_CONTEXT:
4493 /* The caller have already updated the context */
4494 E1K_INC_ISTAT_CNT(pState->uStatDescCtx);
4495 e1kDescReport(pState, pDesc, addr);
4496 break;
4497
4498 case E1K_DTYP_DATA:
4499 {
4500 STAM_COUNTER_INC(pDesc->data.cmd.fTSE?
4501 &pState->StatTxDescTSEData:
4502 &pState->StatTxDescData);
4503 E1K_INC_ISTAT_CNT(pState->uStatDescDat);
4504 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4505 if (pDesc->data.cmd.u20DTALEN == 0 || pDesc->data.u64BufAddr == 0)
4506 {
4507 E1kLog2(("% Empty data descriptor, skipped.\n", INSTANCE(pState)));
4508 }
4509 else
4510 {
4511 /*
4512 * Add the descriptor data to the frame. If the frame is complete,
4513 * transmit it and reset the u16TxPktLen field.
4514 */
4515 if (e1kXmitIsGsoBuf(pState->CTX_SUFF(pTxSg)))
4516 {
4517 STAM_COUNTER_INC(&pState->StatTxPathGSO);
4518 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4519 if (pDesc->data.cmd.fEOP)
4520 {
4521 if ( fRc
4522 && pState->CTX_SUFF(pTxSg)
4523 && pState->CTX_SUFF(pTxSg)->cbUsed == (size_t)pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN)
4524 {
4525 e1kTransmitFrame(pState, fOnWorkerThread);
4526 E1K_INC_CNT32(TSCTC);
4527 }
4528 else
4529 {
4530 if (fRc)
4531 E1kLog(("%s bad GSO/TSE %p or %u < %u\n" , INSTANCE(pState),
4532 pState->CTX_SUFF(pTxSg), pState->CTX_SUFF(pTxSg) ? pState->CTX_SUFF(pTxSg)->cbUsed : 0,
4533 pState->contextTSE.dw3.u8HDRLEN + pState->contextTSE.dw2.u20PAYLEN));
4534 e1kXmitFreeBuf(pState);
4535 E1K_INC_CNT32(TSCTFC);
4536 }
4537 pState->u16TxPktLen = 0;
4538 }
4539 }
4540 else if (!pDesc->data.cmd.fTSE)
4541 {
4542 STAM_COUNTER_INC(&pState->StatTxPathRegular);
4543 bool fRc = e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->data.cmd.u20DTALEN);
4544 if (pDesc->data.cmd.fEOP)
4545 {
4546 if (fRc && pState->CTX_SUFF(pTxSg))
4547 {
4548 Assert(pState->CTX_SUFF(pTxSg)->cSegs == 1);
4549 if (pState->fIPcsum)
4550 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4551 pState->contextNormal.ip.u8CSO,
4552 pState->contextNormal.ip.u8CSS,
4553 pState->contextNormal.ip.u16CSE);
4554 if (pState->fTCPcsum)
4555 e1kInsertChecksum(pState, (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg, pState->u16TxPktLen,
4556 pState->contextNormal.tu.u8CSO,
4557 pState->contextNormal.tu.u8CSS,
4558 pState->contextNormal.tu.u16CSE);
4559 e1kTransmitFrame(pState, fOnWorkerThread);
4560 }
4561 else
4562 e1kXmitFreeBuf(pState);
4563 pState->u16TxPktLen = 0;
4564 }
4565 }
4566 else
4567 {
4568 STAM_COUNTER_INC(&pState->StatTxPathFallback);
4569 rc = e1kFallbackAddToFrame(pState, pDesc, fOnWorkerThread);
4570 }
4571 }
4572 e1kDescReport(pState, pDesc, addr);
4573 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4574 break;
4575 }
4576
4577 case E1K_DTYP_LEGACY:
4578 STAM_COUNTER_INC(&pState->StatTxDescLegacy);
4579 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4580 if (pDesc->legacy.cmd.u16Length == 0 || pDesc->legacy.u64BufAddr == 0)
4581 {
4582 E1kLog(("%s Empty legacy descriptor, skipped.\n", INSTANCE(pState)));
4583 }
4584 else
4585 {
4586 /* Add fragment to frame. */
4587 if (e1kAddToFrame(pState, pDesc->data.u64BufAddr, pDesc->legacy.cmd.u16Length))
4588 {
4589 E1K_INC_ISTAT_CNT(pState->uStatDescLeg);
4590
4591 /* Last fragment: Transmit and reset the packet storage counter. */
4592 if (pDesc->legacy.cmd.fEOP)
4593 {
4594 if (pDesc->legacy.cmd.fIC)
4595 {
4596 e1kInsertChecksum(pState,
4597 (uint8_t *)pState->CTX_SUFF(pTxSg)->aSegs[0].pvSeg,
4598 pState->u16TxPktLen,
4599 pDesc->legacy.cmd.u8CSO,
4600 pDesc->legacy.dw3.u8CSS,
4601 0);
4602 }
4603 e1kTransmitFrame(pState, fOnWorkerThread);
4604 pState->u16TxPktLen = 0;
4605 }
4606 }
4607 /* Last fragment + failure: free the buffer and reset the storage counter. */
4608 else if (pDesc->legacy.cmd.fEOP)
4609 {
4610 e1kXmitFreeBuf(pState);
4611 pState->u16TxPktLen = 0;
4612 }
4613 }
4614 e1kDescReport(pState, pDesc, addr);
4615 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4616 break;
4617
4618 default:
4619 E1kLog(("%s ERROR Unsupported transmit descriptor type: 0x%04x\n",
4620 INSTANCE(pState), e1kGetDescType(pDesc)));
4621 break;
4622 }
4623
4624 return rc;
4625}
4626
4627
4628DECLINLINE(void) e1kUpdateTxContext(E1KSTATE* pState, E1KTXDESC* pDesc)
4629{
4630 if (pDesc->context.dw2.fTSE)
4631 {
4632 pState->contextTSE = pDesc->context;
4633 pState->u32PayRemain = pDesc->context.dw2.u20PAYLEN;
4634 pState->u16HdrRemain = pDesc->context.dw3.u8HDRLEN;
4635 e1kSetupGsoCtx(&pState->GsoCtx, &pDesc->context);
4636 STAM_COUNTER_INC(&pState->StatTxDescCtxTSE);
4637 }
4638 else
4639 {
4640 pState->contextNormal = pDesc->context;
4641 STAM_COUNTER_INC(&pState->StatTxDescCtxNormal);
4642 }
4643 E1kLog2(("%s %s context updated: IP CSS=%02X, IP CSO=%02X, IP CSE=%04X"
4644 ", TU CSS=%02X, TU CSO=%02X, TU CSE=%04X\n", INSTANCE(pState),
4645 pDesc->context.dw2.fTSE ? "TSE" : "Normal",
4646 pDesc->context.ip.u8CSS,
4647 pDesc->context.ip.u8CSO,
4648 pDesc->context.ip.u16CSE,
4649 pDesc->context.tu.u8CSS,
4650 pDesc->context.tu.u8CSO,
4651 pDesc->context.tu.u16CSE));
4652}
4653
4654
4655static bool e1kLocateTxPacket(E1KSTATE *pState)
4656{
4657 LogFlow(("%s e1kLocateTxPacket: ENTER cbTxAlloc=%d\n",
4658 INSTANCE(pState), pState->cbTxAlloc));
4659 /* Check if we have located the packet already. */
4660 if (pState->cbTxAlloc)
4661 {
4662 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4663 INSTANCE(pState), pState->cbTxAlloc));
4664 return true;
4665 }
4666
4667 bool fTSE = false;
4668 uint32_t cbPacket = 0;
4669
4670 for (int i = pState->iTxDCurrent; i < pState->nTxDFetched; ++i)
4671 {
4672 E1KTXDESC *pDesc = &pState->aTxDescriptors[i];
4673 switch (e1kGetDescType(pDesc))
4674 {
4675 case E1K_DTYP_CONTEXT:
4676 e1kUpdateTxContext(pState, pDesc);
4677 continue;
4678 case E1K_DTYP_LEGACY:
4679 /* Skip empty descriptors. */
4680 if (!pDesc->legacy.u64BufAddr || !pDesc->legacy.cmd.u16Length)
4681 break;
4682 cbPacket += pDesc->legacy.cmd.u16Length;
4683 pState->fGSO = false;
4684 break;
4685 case E1K_DTYP_DATA:
4686 /* Skip empty descriptors. */
4687 if (!pDesc->data.u64BufAddr || !pDesc->data.cmd.u20DTALEN)
4688 break;
4689 if (cbPacket == 0)
4690 {
4691 /*
4692 * The first fragment: save IXSM and TXSM options
4693 * as these are only valid in the first fragment.
4694 */
4695 pState->fIPcsum = pDesc->data.dw3.fIXSM;
4696 pState->fTCPcsum = pDesc->data.dw3.fTXSM;
4697 fTSE = pDesc->data.cmd.fTSE;
4698 /*
4699 * TSE descriptors have VLE bit properly set in
4700 * the first fragment.
4701 */
4702 if (fTSE)
4703 {
4704 pState->fVTag = pDesc->data.cmd.fVLE;
4705 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4706 }
4707 pState->fGSO = e1kCanDoGso(&pState->GsoCtx, &pDesc->data, &pState->contextTSE);
4708 }
4709 cbPacket += pDesc->data.cmd.u20DTALEN;
4710 break;
4711 default:
4712 AssertMsgFailed(("Impossible descriptor type!"));
4713 }
4714 if (pDesc->legacy.cmd.fEOP)
4715 {
4716 /*
4717 * Non-TSE descriptors have VLE bit properly set in
4718 * the last fragment.
4719 */
4720 if (!fTSE)
4721 {
4722 pState->fVTag = pDesc->data.cmd.fVLE;
4723 pState->u16VTagTCI = pDesc->data.dw3.u16Special;
4724 }
4725 /*
4726 * Compute the required buffer size. If we cannot do GSO but still
4727 * have to do segmentation we allocate the first segment only.
4728 */
4729 pState->cbTxAlloc = (!fTSE || pState->fGSO) ?
4730 cbPacket :
4731 RT_MIN(cbPacket, pState->contextTSE.dw3.u16MSS + pState->contextTSE.dw3.u8HDRLEN);
4732 if (pState->fVTag)
4733 pState->cbTxAlloc += 4;
4734 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d\n",
4735 INSTANCE(pState), pState->cbTxAlloc));
4736 return true;
4737 }
4738 }
4739
4740 if (cbPacket == 0 && pState->nTxDFetched - pState->iTxDCurrent > 0)
4741 {
4742 /* All descriptors were empty, we need to process them as a dummy packet */
4743 LogFlow(("%s e1kLocateTxPacket: RET true cbTxAlloc=%d, zero packet!\n",
4744 INSTANCE(pState), pState->cbTxAlloc));
4745 return true;
4746 }
4747 LogFlow(("%s e1kLocateTxPacket: RET false cbTxAlloc=%d\n",
4748 INSTANCE(pState), pState->cbTxAlloc));
4749 return false;
4750}
4751
4752
4753static int e1kXmitPacket(E1KSTATE *pState, bool fOnWorkerThread)
4754{
4755 int rc = VINF_SUCCESS;
4756
4757 LogFlow(("%s e1kXmitPacket: ENTER current=%d fetched=%d\n",
4758 INSTANCE(pState), pState->iTxDCurrent, pState->nTxDFetched));
4759
4760 while (pState->iTxDCurrent < pState->nTxDFetched)
4761 {
4762 E1KTXDESC *pDesc = &pState->aTxDescriptors[pState->iTxDCurrent];
4763 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4764 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(E1KTXDESC), TDLEN, TDH, TDT));
4765 rc = e1kXmitDesc(pState, pDesc,
4766 ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(E1KTXDESC),
4767 fOnWorkerThread);
4768 if (RT_FAILURE(rc))
4769 break;
4770 if (++TDH * sizeof(E1KTXDESC) >= TDLEN)
4771 TDH = 0;
4772 uint32_t uLowThreshold = GET_BITS(TXDCTL, LWTHRESH)*8;
4773 if (uLowThreshold != 0 && e1kGetTxLen(pState) <= uLowThreshold)
4774 {
4775 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4776 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4777 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4778 }
4779 ++pState->iTxDCurrent;
4780 if (e1kGetDescType(pDesc) != E1K_DTYP_CONTEXT && pDesc->legacy.cmd.fEOP)
4781 break;
4782 }
4783
4784 LogFlow(("%s e1kXmitPacket: RET %Rrc current=%d fetched=%d\n",
4785 INSTANCE(pState), rc, pState->iTxDCurrent, pState->nTxDFetched));
4786 return rc;
4787}
4788#endif /* E1K_WITH_TXD_CACHE */
4789
4790#ifndef E1K_WITH_TXD_CACHE
4791/**
4792 * Transmit pending descriptors.
4793 *
4794 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
4795 *
4796 * @param pState The E1000 state.
4797 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
4798 */
4799static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
4800{
4801 int rc = VINF_SUCCESS;
4802
4803 /* Check if transmitter is enabled. */
4804 if (!(TCTL & TCTL_EN))
4805 return VINF_SUCCESS;
4806 /*
4807 * Grab the xmit lock of the driver as well as the E1K device state.
4808 */
4809 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
4810 if (pDrv)
4811 {
4812 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
4813 if (RT_FAILURE(rc))
4814 return rc;
4815 }
4816 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
4817 if (RT_LIKELY(rc == VINF_SUCCESS))
4818 {
4819 /*
4820 * Process all pending descriptors.
4821 * Note! Do not process descriptors in locked state
4822 */
4823 while (TDH != TDT && !pState->fLocked)
4824 {
4825 E1KTXDESC desc;
4826 E1kLog3(("%s About to process new TX descriptor at %08x%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
4827 INSTANCE(pState), TDBAH, TDBAL + TDH * sizeof(desc), TDLEN, TDH, TDT));
4828
4829 e1kLoadDesc(pState, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc));
4830 rc = e1kXmitDesc(pState, &desc, ((uint64_t)TDBAH << 32) + TDBAL + TDH * sizeof(desc), fOnWorkerThread);
4831 /* If we failed to transmit descriptor we will try it again later */
4832 if (RT_FAILURE(rc))
4833 break;
4834 if (++TDH * sizeof(desc) >= TDLEN)
4835 TDH = 0;
4836
4837 if (e1kGetTxLen(pState) <= GET_BITS(TXDCTL, LWTHRESH)*8)
4838 {
4839 E1kLog2(("%s Low on transmit descriptors, raise ICR.TXD_LOW, len=%x thresh=%x\n",
4840 INSTANCE(pState), e1kGetTxLen(pState), GET_BITS(TXDCTL, LWTHRESH)*8));
4841 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4842 }
4843
4844 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4845 }
4846
4847 /// @todo: uncomment: pState->uStatIntTXQE++;
4848 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
4849 e1kCsTxLeave(pState);
4850 }
4851
4852 /*
4853 * Release the lock.
4854 */
4855 if (pDrv)
4856 pDrv->pfnEndXmit(pDrv);
4857 return rc;
4858}
4859#else /* E1K_WITH_TXD_CACHE */
4860static void e1kDumpTxDCache(E1KSTATE *pState)
4861{
4862 for (int i = 0; i < pState->nTxDFetched; ++i)
4863 e1kPrintTDesc(pState, &pState->aTxDescriptors[i], "***", RTLOGGRPFLAGS_LEVEL_4);
4864}
4865
4866/**
4867 * Transmit pending descriptors.
4868 *
4869 * @returns VBox status code. VERR_TRY_AGAIN is returned if we're busy.
4870 *
4871 * @param pState The E1000 state.
4872 * @param fOnWorkerThread Whether we're on a worker thread or on an EMT.
4873 */
4874static int e1kXmitPending(E1KSTATE *pState, bool fOnWorkerThread)
4875{
4876 int rc = VINF_SUCCESS;
4877
4878 /* Check if transmitter is enabled. */
4879 if (!(TCTL & TCTL_EN))
4880 return VINF_SUCCESS;
4881 /*
4882 * Grab the xmit lock of the driver as well as the E1K device state.
4883 */
4884 PPDMINETWORKUP pDrv = pState->CTX_SUFF(pDrv);
4885 if (pDrv)
4886 {
4887 rc = pDrv->pfnBeginXmit(pDrv, fOnWorkerThread);
4888 if (RT_FAILURE(rc))
4889 return rc;
4890 }
4891
4892 /*
4893 * Process all pending descriptors.
4894 * Note! Do not process descriptors in locked state
4895 */
4896 rc = e1kCsTxEnter(pState, VERR_SEM_BUSY);
4897 if (RT_LIKELY(rc == VINF_SUCCESS))
4898 {
4899 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatTransmit), a);
4900 /*
4901 * fIncomplete is set whenever we try to fetch additional descriptors
4902 * for an incomplete packet. If fail to locate a complete packet on
4903 * the next iteration we need to reset the cache or we risk to get
4904 * stuck in this loop forever.
4905 */
4906 bool fIncomplete = false;
4907 while (!pState->fLocked && e1kTxDLazyLoad(pState))
4908 {
4909 while (e1kLocateTxPacket(pState))
4910 {
4911 fIncomplete = false;
4912 /* Found a complete packet, allocate it. */
4913 rc = e1kXmitAllocBuf(pState, pState->fGSO);
4914 /* If we're out of bandwidth we'll come back later. */
4915 if (RT_FAILURE(rc))
4916 goto out;
4917 /* Copy the packet to allocated buffer and send it. */
4918 rc = e1kXmitPacket(pState, fOnWorkerThread);
4919 /* If we're out of bandwidth we'll come back later. */
4920 if (RT_FAILURE(rc))
4921 goto out;
4922 }
4923 uint8_t u8Remain = pState->nTxDFetched - pState->iTxDCurrent;
4924 if (RT_UNLIKELY(fIncomplete))
4925 {
4926 /*
4927 * The descriptor cache is full, but we were unable to find
4928 * a complete packet in it. Drop the cache and hope that
4929 * the guest driver can recover from network card error.
4930 */
4931 LogRel(("%s No complete packets in%s TxD cache! "
4932 "Fetched=%d, current=%d, TX len=%d.\n",
4933 INSTANCE(pState),
4934 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
4935 pState->nTxDFetched, pState->iTxDCurrent,
4936 e1kGetTxLen(pState)));
4937 Log4(("%s No complete packets in%s TxD cache! "
4938 "Fetched=%d, current=%d, TX len=%d. Dump follows:\n",
4939 INSTANCE(pState),
4940 u8Remain == E1K_TXD_CACHE_SIZE ? " full" : "",
4941 pState->nTxDFetched, pState->iTxDCurrent,
4942 e1kGetTxLen(pState)));
4943 e1kDumpTxDCache(pState);
4944 pState->iTxDCurrent = pState->nTxDFetched = 0;
4945 rc = VERR_NET_IO_ERROR;
4946 goto out;
4947 }
4948 if (u8Remain > 0)
4949 {
4950 Log4(("%s Incomplete packet at %d. Already fetched %d, "
4951 "%d more are available\n",
4952 INSTANCE(pState), pState->iTxDCurrent, u8Remain,
4953 e1kGetTxLen(pState) - u8Remain));
4954
4955 /*
4956 * A packet was partially fetched. Move incomplete packet to
4957 * the beginning of cache buffer, then load more descriptors.
4958 */
4959 memmove(pState->aTxDescriptors,
4960 &pState->aTxDescriptors[pState->iTxDCurrent],
4961 u8Remain * sizeof(E1KTXDESC));
4962 pState->iTxDCurrent = 0;
4963 pState->nTxDFetched = u8Remain;
4964 e1kTxDLoadMore(pState);
4965 fIncomplete = true;
4966 }
4967 else
4968 pState->nTxDFetched = 0;
4969 pState->iTxDCurrent = 0;
4970 }
4971 if (!pState->fLocked && GET_BITS(TXDCTL, LWTHRESH) == 0)
4972 {
4973 E1kLog2(("%s Out of transmit descriptors, raise ICR.TXD_LOW\n",
4974 INSTANCE(pState)));
4975 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_TXD_LOW);
4976 }
4977out:
4978 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatTransmit), a);
4979
4980 /// @todo: uncomment: pState->uStatIntTXQE++;
4981 /// @todo: uncomment: e1kRaiseInterrupt(pState, ICR_TXQE);
4982
4983 e1kCsTxLeave(pState);
4984 }
4985
4986
4987 /*
4988 * Release the lock.
4989 */
4990 if (pDrv)
4991 pDrv->pfnEndXmit(pDrv);
4992 return rc;
4993}
4994#endif /* E1K_WITH_TXD_CACHE */
4995
4996#ifdef IN_RING3
4997
4998/**
4999 * @interface_method_impl{PDMINETWORKDOWN,pfnXmitPending}
5000 */
5001static DECLCALLBACK(void) e1kNetworkDown_XmitPending(PPDMINETWORKDOWN pInterface)
5002{
5003 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5004 /* Resume suspended transmission */
5005 STATUS &= ~STATUS_TXOFF;
5006 e1kXmitPending(pState, true /*fOnWorkerThread*/);
5007}
5008
5009/**
5010 * Callback for consuming from transmit queue. It gets called in R3 whenever
5011 * we enqueue something in R0/GC.
5012 *
5013 * @returns true
5014 * @param pDevIns Pointer to device instance structure.
5015 * @param pItem Pointer to the element being dequeued (not used).
5016 * @thread ???
5017 */
5018static DECLCALLBACK(bool) e1kTxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5019{
5020 NOREF(pItem);
5021 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5022 E1kLog2(("%s e1kTxQueueConsumer:\n", INSTANCE(pState)));
5023
5024 int rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
5025 AssertMsg(RT_SUCCESS(rc) || rc == VERR_TRY_AGAIN, ("%Rrc\n", rc));
5026
5027 return true;
5028}
5029
5030/**
5031 * Handler for the wakeup signaller queue.
5032 */
5033static DECLCALLBACK(bool) e1kCanRxQueueConsumer(PPDMDEVINS pDevIns, PPDMQUEUEITEMCORE pItem)
5034{
5035 e1kWakeupReceive(pDevIns);
5036 return true;
5037}
5038
5039#endif /* IN_RING3 */
5040
5041/**
5042 * Write handler for Transmit Descriptor Tail register.
5043 *
5044 * @param pState The device state structure.
5045 * @param offset Register offset in memory-mapped frame.
5046 * @param index Register index in register array.
5047 * @param value The value to store.
5048 * @param mask Used to implement partial writes (8 and 16-bit).
5049 * @thread EMT
5050 */
5051static int e1kRegWriteTDT(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5052{
5053 int rc = e1kRegWriteDefault(pState, offset, index, value);
5054
5055 /* All descriptors starting with head and not including tail belong to us. */
5056 /* Process them. */
5057 E1kLog2(("%s e1kRegWriteTDT: TDBAL=%08x, TDBAH=%08x, TDLEN=%08x, TDH=%08x, TDT=%08x\n",
5058 INSTANCE(pState), TDBAL, TDBAH, TDLEN, TDH, TDT));
5059
5060 /* Ignore TDT writes when the link is down. */
5061 if (TDH != TDT && (STATUS & STATUS_LU))
5062 {
5063 E1kLogRel(("E1000: TDT write: %d descriptors to process\n", e1kGetTxLen(pState)));
5064 E1kLog(("%s e1kRegWriteTDT: %d descriptors to process\n",
5065 INSTANCE(pState), e1kGetTxLen(pState)));
5066
5067 /* Transmit pending packets if possible, defer it if we cannot do it
5068 in the current context. */
5069# ifndef IN_RING3
5070 if (!pState->CTX_SUFF(pDrv))
5071 {
5072 PPDMQUEUEITEMCORE pItem = PDMQueueAlloc(pState->CTX_SUFF(pTxQueue));
5073 if (RT_UNLIKELY(pItem))
5074 PDMQueueInsert(pState->CTX_SUFF(pTxQueue), pItem);
5075 }
5076 else
5077# endif
5078 {
5079 rc = e1kXmitPending(pState, false /*fOnWorkerThread*/);
5080 if (rc == VERR_TRY_AGAIN)
5081 rc = VINF_SUCCESS;
5082 else if (rc == VERR_SEM_BUSY)
5083 rc = VINF_IOM_R3_IOPORT_WRITE;
5084 AssertRC(rc);
5085 }
5086 }
5087
5088 return rc;
5089}
5090
5091/**
5092 * Write handler for Multicast Table Array registers.
5093 *
5094 * @param pState The device state structure.
5095 * @param offset Register offset in memory-mapped frame.
5096 * @param index Register index in register array.
5097 * @param value The value to store.
5098 * @thread EMT
5099 */
5100static int e1kRegWriteMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5101{
5102 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
5103 pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])] = value;
5104
5105 return VINF_SUCCESS;
5106}
5107
5108/**
5109 * Read handler for Multicast Table Array registers.
5110 *
5111 * @returns VBox status code.
5112 *
5113 * @param pState The device state structure.
5114 * @param offset Register offset in memory-mapped frame.
5115 * @param index Register index in register array.
5116 * @thread EMT
5117 */
5118static int e1kRegReadMTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5119{
5120 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auMTA), VERR_DEV_IO_ERROR);
5121 *pu32Value = pState->auMTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auMTA[0])];
5122
5123 return VINF_SUCCESS;
5124}
5125
5126/**
5127 * Write handler for Receive Address registers.
5128 *
5129 * @param pState The device state structure.
5130 * @param offset Register offset in memory-mapped frame.
5131 * @param index Register index in register array.
5132 * @param value The value to store.
5133 * @thread EMT
5134 */
5135static int e1kRegWriteRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5136{
5137 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
5138 pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])] = value;
5139
5140 return VINF_SUCCESS;
5141}
5142
5143/**
5144 * Read handler for Receive Address registers.
5145 *
5146 * @returns VBox status code.
5147 *
5148 * @param pState The device state structure.
5149 * @param offset Register offset in memory-mapped frame.
5150 * @param index Register index in register array.
5151 * @thread EMT
5152 */
5153static int e1kRegReadRA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5154{
5155 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->aRecAddr.au32), VERR_DEV_IO_ERROR);
5156 *pu32Value = pState->aRecAddr.au32[(offset - s_e1kRegMap[index].offset)/sizeof(pState->aRecAddr.au32[0])];
5157
5158 return VINF_SUCCESS;
5159}
5160
5161/**
5162 * Write handler for VLAN Filter Table Array registers.
5163 *
5164 * @param pState The device state structure.
5165 * @param offset Register offset in memory-mapped frame.
5166 * @param index Register index in register array.
5167 * @param value The value to store.
5168 * @thread EMT
5169 */
5170static int e1kRegWriteVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5171{
5172 AssertReturn(offset - s_e1kRegMap[index].offset < sizeof(pState->auVFTA), VINF_SUCCESS);
5173 pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])] = value;
5174
5175 return VINF_SUCCESS;
5176}
5177
5178/**
5179 * Read handler for VLAN Filter Table Array registers.
5180 *
5181 * @returns VBox status code.
5182 *
5183 * @param pState The device state structure.
5184 * @param offset Register offset in memory-mapped frame.
5185 * @param index Register index in register array.
5186 * @thread EMT
5187 */
5188static int e1kRegReadVFTA(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5189{
5190 AssertReturn(offset - s_e1kRegMap[index].offset< sizeof(pState->auVFTA), VERR_DEV_IO_ERROR);
5191 *pu32Value = pState->auVFTA[(offset - s_e1kRegMap[index].offset)/sizeof(pState->auVFTA[0])];
5192
5193 return VINF_SUCCESS;
5194}
5195
5196/**
5197 * Read handler for unimplemented registers.
5198 *
5199 * Merely reports reads from unimplemented registers.
5200 *
5201 * @returns VBox status code.
5202 *
5203 * @param pState The device state structure.
5204 * @param offset Register offset in memory-mapped frame.
5205 * @param index Register index in register array.
5206 * @thread EMT
5207 */
5208
5209static int e1kRegReadUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5210{
5211 E1kLog(("%s At %08X read (00000000) attempt from unimplemented register %s (%s)\n",
5212 INSTANCE(pState), offset, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5213 *pu32Value = 0;
5214
5215 return VINF_SUCCESS;
5216}
5217
5218/**
5219 * Default register read handler with automatic clear operation.
5220 *
5221 * Retrieves the value of register from register array in device state structure.
5222 * Then resets all bits.
5223 *
5224 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5225 * done in the caller.
5226 *
5227 * @returns VBox status code.
5228 *
5229 * @param pState The device state structure.
5230 * @param offset Register offset in memory-mapped frame.
5231 * @param index Register index in register array.
5232 * @thread EMT
5233 */
5234
5235static int e1kRegReadAutoClear(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5236{
5237 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5238 int rc = e1kRegReadDefault(pState, offset, index, pu32Value);
5239 pState->auRegs[index] = 0;
5240
5241 return rc;
5242}
5243
5244/**
5245 * Default register read handler.
5246 *
5247 * Retrieves the value of register from register array in device state structure.
5248 * Bits corresponding to 0s in 'readable' mask will always read as 0s.
5249 *
5250 * @remarks The 'mask' parameter is simply ignored as masking and shifting is
5251 * done in the caller.
5252 *
5253 * @returns VBox status code.
5254 *
5255 * @param pState The device state structure.
5256 * @param offset Register offset in memory-mapped frame.
5257 * @param index Register index in register array.
5258 * @thread EMT
5259 */
5260
5261static int e1kRegReadDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t *pu32Value)
5262{
5263 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5264 *pu32Value = pState->auRegs[index] & s_e1kRegMap[index].readable;
5265
5266 return VINF_SUCCESS;
5267}
5268
5269/**
5270 * Write handler for unimplemented registers.
5271 *
5272 * Merely reports writes to unimplemented registers.
5273 *
5274 * @param pState The device state structure.
5275 * @param offset Register offset in memory-mapped frame.
5276 * @param index Register index in register array.
5277 * @param value The value to store.
5278 * @thread EMT
5279 */
5280
5281 static int e1kRegWriteUnimplemented(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5282{
5283 E1kLog(("%s At %08X write attempt (%08X) to unimplemented register %s (%s)\n",
5284 INSTANCE(pState), offset, value, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5285
5286 return VINF_SUCCESS;
5287}
5288
5289/**
5290 * Default register write handler.
5291 *
5292 * Stores the value to the register array in device state structure. Only bits
5293 * corresponding to 1s both in 'writable' and 'mask' will be stored.
5294 *
5295 * @returns VBox status code.
5296 *
5297 * @param pState The device state structure.
5298 * @param offset Register offset in memory-mapped frame.
5299 * @param index Register index in register array.
5300 * @param value The value to store.
5301 * @param mask Used to implement partial writes (8 and 16-bit).
5302 * @thread EMT
5303 */
5304
5305static int e1kRegWriteDefault(E1KSTATE* pState, uint32_t offset, uint32_t index, uint32_t value)
5306{
5307 AssertReturn(index < E1K_NUM_OF_32BIT_REGS, VERR_DEV_IO_ERROR);
5308 pState->auRegs[index] = (value & s_e1kRegMap[index].writable) |
5309 (pState->auRegs[index] & ~s_e1kRegMap[index].writable);
5310
5311 return VINF_SUCCESS;
5312}
5313
5314/**
5315 * Search register table for matching register.
5316 *
5317 * @returns Index in the register table or -1 if not found.
5318 *
5319 * @param pState The device state structure.
5320 * @param uOffset Register offset in memory-mapped region.
5321 * @thread EMT
5322 */
5323static int e1kRegLookup(E1KSTATE *pState, uint32_t uOffset)
5324{
5325 int index;
5326
5327 for (index = 0; index < E1K_NUM_OF_REGS; index++)
5328 {
5329 if (s_e1kRegMap[index].offset <= uOffset && uOffset < s_e1kRegMap[index].offset + s_e1kRegMap[index].size)
5330 {
5331 return index;
5332 }
5333 }
5334
5335 return -1;
5336}
5337
5338/**
5339 * Handle register read operation.
5340 *
5341 * Looks up and calls appropriate handler.
5342 *
5343 * @returns VBox status code.
5344 *
5345 * @param pState The device state structure.
5346 * @param uOffset Register offset in memory-mapped frame.
5347 * @param pv Where to store the result.
5348 * @param cb Number of bytes to read.
5349 * @thread EMT
5350 */
5351static int e1kRegRead(E1KSTATE *pState, uint32_t uOffset, void *pv, uint32_t cb)
5352{
5353 uint32_t u32 = 0;
5354 uint32_t mask = 0;
5355 uint32_t shift;
5356 int rc = VINF_SUCCESS;
5357 int index = e1kRegLookup(pState, uOffset);
5358 const char *szInst = INSTANCE(pState);
5359#ifdef DEBUG
5360 char buf[9];
5361#endif
5362
5363 /*
5364 * From the spec:
5365 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5366 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5367 */
5368
5369 /*
5370 * To be able to write bytes and short word we convert them
5371 * to properly shifted 32-bit words and masks. The idea is
5372 * to keep register-specific handlers simple. Most accesses
5373 * will be 32-bit anyway.
5374 */
5375 switch (cb)
5376 {
5377 case 1: mask = 0x000000FF; break;
5378 case 2: mask = 0x0000FFFF; break;
5379 case 4: mask = 0xFFFFFFFF; break;
5380 default:
5381 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5382 "%s e1kRegRead: unsupported op size: offset=%#10x cb=%#10x\n",
5383 szInst, uOffset, cb);
5384 }
5385 if (index != -1)
5386 {
5387 if (s_e1kRegMap[index].readable)
5388 {
5389 /* Make the mask correspond to the bits we are about to read. */
5390 shift = (uOffset - s_e1kRegMap[index].offset) % sizeof(uint32_t) * 8;
5391 mask <<= shift;
5392 if (!mask)
5393 return PDMDevHlpDBGFStop(pState->CTX_SUFF(pDevIns), RT_SRC_POS,
5394 "%s e1kRegRead: Zero mask: offset=%#10x cb=%#10x\n",
5395 szInst, uOffset, cb);
5396 /*
5397 * Read it. Pass the mask so the handler knows what has to be read.
5398 * Mask out irrelevant bits.
5399 */
5400 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5401 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5402 return rc;
5403 //pState->fDelayInts = false;
5404 //pState->iStatIntLost += pState->iStatIntLostOne;
5405 //pState->iStatIntLostOne = 0;
5406 rc = s_e1kRegMap[index].pfnRead(pState, uOffset & 0xFFFFFFFC, index, &u32);
5407 u32 &= mask;
5408 //e1kCsLeave(pState);
5409 E1kLog2(("%s At %08X read %s from %s (%s)\n",
5410 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5411 /* Shift back the result. */
5412 u32 >>= shift;
5413 }
5414 else
5415 {
5416 E1kLog(("%s At %08X read (%s) attempt from write-only register %s (%s)\n",
5417 szInst, uOffset, e1kU32toHex(u32, mask, buf), s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5418 }
5419 }
5420 else
5421 {
5422 E1kLog(("%s At %08X read (%s) attempt from non-existing register\n",
5423 szInst, uOffset, e1kU32toHex(u32, mask, buf)));
5424 }
5425
5426 memcpy(pv, &u32, cb);
5427 return rc;
5428}
5429
5430/**
5431 * Handle register write operation.
5432 *
5433 * Looks up and calls appropriate handler.
5434 *
5435 * @returns VBox status code.
5436 *
5437 * @param pState The device state structure.
5438 * @param uOffset Register offset in memory-mapped frame.
5439 * @param pv Where to fetch the value.
5440 * @param cb Number of bytes to write.
5441 * @thread EMT
5442 */
5443static int e1kRegWrite(E1KSTATE *pState, uint32_t uOffset, void const *pv, unsigned cb)
5444{
5445 int rc = VINF_SUCCESS;
5446 int index = e1kRegLookup(pState, uOffset);
5447 uint32_t u32;
5448
5449 /*
5450 * From the spec:
5451 * For registers that should be accessed as 32-bit double words, partial writes (less than a 32-bit
5452 * double word) is ignored. Partial reads return all 32 bits of data regardless of the byte enables.
5453 */
5454
5455 if (cb != 4)
5456 {
5457 E1kLog(("%s e1kRegWrite: Spec violation: unsupported op size: offset=%#10x cb=%#10x, ignored.\n",
5458 INSTANCE(pState), uOffset, cb));
5459 return VINF_SUCCESS;
5460 }
5461 if (uOffset & 3)
5462 {
5463 E1kLog(("%s e1kRegWrite: Spec violation: misaligned offset: %#10x cb=%#10x, ignored.\n",
5464 INSTANCE(pState), uOffset, cb));
5465 return VINF_SUCCESS;
5466 }
5467 u32 = *(uint32_t*)pv;
5468 if (index != -1)
5469 {
5470 if (s_e1kRegMap[index].writable)
5471 {
5472 /*
5473 * Write it. Pass the mask so the handler knows what has to be written.
5474 * Mask out irrelevant bits.
5475 */
5476 E1kLog2(("%s At %08X write %08X to %s (%s)\n",
5477 INSTANCE(pState), uOffset, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5478 //rc = e1kCsEnter(pState, VERR_SEM_BUSY, RT_SRC_POS);
5479 if (RT_UNLIKELY(rc != VINF_SUCCESS))
5480 return rc;
5481 //pState->fDelayInts = false;
5482 //pState->iStatIntLost += pState->iStatIntLostOne;
5483 //pState->iStatIntLostOne = 0;
5484 rc = s_e1kRegMap[index].pfnWrite(pState, uOffset, index, u32);
5485 //e1kCsLeave(pState);
5486 }
5487 else
5488 {
5489 E1kLog(("%s At %08X write attempt (%08X) to read-only register %s (%s)\n",
5490 INSTANCE(pState), uOffset, u32, s_e1kRegMap[index].abbrev, s_e1kRegMap[index].name));
5491 }
5492 }
5493 else
5494 {
5495 E1kLog(("%s At %08X write attempt (%08X) to non-existing register\n",
5496 INSTANCE(pState), uOffset, u32));
5497 }
5498 return rc;
5499}
5500
5501/**
5502 * I/O handler for memory-mapped read operations.
5503 *
5504 * @returns VBox status code.
5505 *
5506 * @param pDevIns The device instance.
5507 * @param pvUser User argument.
5508 * @param GCPhysAddr Physical address (in GC) where the read starts.
5509 * @param pv Where to store the result.
5510 * @param cb Number of bytes read.
5511 * @thread EMT
5512 */
5513PDMBOTHCBDECL(int) e1kMMIORead(PPDMDEVINS pDevIns, void *pvUser,
5514 RTGCPHYS GCPhysAddr, void *pv, unsigned cb)
5515{
5516 NOREF(pvUser);
5517 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5518 uint32_t uOffset = GCPhysAddr - pState->addrMMReg;
5519 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIORead), a);
5520
5521 Assert(uOffset < E1K_MM_SIZE);
5522
5523 int rc = e1kRegRead(pState, uOffset, pv, cb);
5524 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIORead), a);
5525 return rc;
5526}
5527
5528/**
5529 * Memory mapped I/O Handler for write operations.
5530 *
5531 * @returns VBox status code.
5532 *
5533 * @param pDevIns The device instance.
5534 * @param pvUser User argument.
5535 * @param GCPhysAddr Physical address (in GC) where the read starts.
5536 * @param pv Where to fetch the value.
5537 * @param cb Number of bytes to write.
5538 * @thread EMT
5539 */
5540PDMBOTHCBDECL(int) e1kMMIOWrite(PPDMDEVINS pDevIns, void *pvUser,
5541 RTGCPHYS GCPhysAddr, void const *pv, unsigned cb)
5542{
5543 NOREF(pvUser);
5544 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5545 uint32_t uOffset = GCPhysAddr - pState->addrMMReg;
5546 int rc;
5547 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5548
5549 Assert(uOffset < E1K_MM_SIZE);
5550 if (cb != 4)
5551 {
5552 E1kLog(("%s e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x", pDevIns, uOffset, cb));
5553 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kMMIOWrite: invalid op size: offset=%#10x cb=%#10x\n", uOffset, cb);
5554 }
5555 else
5556 rc = e1kRegWrite(pState, uOffset, pv, cb);
5557
5558 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatMMIOWrite), a);
5559 return rc;
5560}
5561
5562/**
5563 * Port I/O Handler for IN operations.
5564 *
5565 * @returns VBox status code.
5566 *
5567 * @param pDevIns The device instance.
5568 * @param pvUser Pointer to the device state structure.
5569 * @param port Port number used for the IN operation.
5570 * @param pu32 Where to store the result.
5571 * @param cb Number of bytes read.
5572 * @thread EMT
5573 */
5574PDMBOTHCBDECL(int) e1kIOPortIn(PPDMDEVINS pDevIns, void *pvUser,
5575 RTIOPORT port, uint32_t *pu32, unsigned cb)
5576{
5577 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5578 int rc = VINF_SUCCESS;
5579 const char *szInst = INSTANCE(pState);
5580 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIORead), a);
5581
5582 port -= pState->addrIOPort;
5583 if (cb != 4)
5584 {
5585 E1kLog(("%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x", szInst, port, cb));
5586 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortIn: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5587 }
5588 else
5589 switch (port)
5590 {
5591 case 0x00: /* IOADDR */
5592 *pu32 = pState->uSelectedReg;
5593 E1kLog2(("%s e1kIOPortIn: IOADDR(0), selecting register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5594 break;
5595 case 0x04: /* IODATA */
5596 rc = e1kRegRead(pState, pState->uSelectedReg, pu32, cb);
5597 /** @todo wrong return code triggers assertions in the debug build; fix please */
5598 if (rc == VINF_IOM_R3_MMIO_READ)
5599 rc = VINF_IOM_R3_IOPORT_READ;
5600
5601 E1kLog2(("%s e1kIOPortIn: IODATA(4), reading from selected register %#010x, val=%#010x\n", szInst, pState->uSelectedReg, *pu32));
5602 break;
5603 default:
5604 E1kLog(("%s e1kIOPortIn: invalid port %#010x\n", szInst, port));
5605 //*pRC = VERR_IOM_IOPORT_UNUSED;
5606 }
5607
5608 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIORead), a);
5609 return rc;
5610}
5611
5612
5613/**
5614 * Port I/O Handler for OUT operations.
5615 *
5616 * @returns VBox status code.
5617 *
5618 * @param pDevIns The device instance.
5619 * @param pvUser User argument.
5620 * @param Port Port number used for the IN operation.
5621 * @param u32 The value to output.
5622 * @param cb The value size in bytes.
5623 * @thread EMT
5624 */
5625PDMBOTHCBDECL(int) e1kIOPortOut(PPDMDEVINS pDevIns, void *pvUser,
5626 RTIOPORT port, uint32_t u32, unsigned cb)
5627{
5628 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE *);
5629 int rc = VINF_SUCCESS;
5630 const char *szInst = INSTANCE(pState);
5631 STAM_PROFILE_ADV_START(&pState->CTX_SUFF_Z(StatIOWrite), a);
5632
5633 E1kLog2(("%s e1kIOPortOut: port=%RTiop value=%08x\n", szInst, port, u32));
5634 if (cb != 4)
5635 {
5636 E1kLog(("%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb));
5637 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "%s e1kIOPortOut: invalid op size: port=%RTiop cb=%08x\n", szInst, port, cb);
5638 }
5639 else
5640 {
5641 port -= pState->addrIOPort;
5642 switch (port)
5643 {
5644 case 0x00: /* IOADDR */
5645 pState->uSelectedReg = u32;
5646 E1kLog2(("%s e1kIOPortOut: IOADDR(0), selected register %08x\n", szInst, pState->uSelectedReg));
5647 break;
5648 case 0x04: /* IODATA */
5649 E1kLog2(("%s e1kIOPortOut: IODATA(4), writing to selected register %#010x, value=%#010x\n", szInst, pState->uSelectedReg, u32));
5650 rc = e1kRegWrite(pState, pState->uSelectedReg, &u32, cb);
5651 /** @todo wrong return code triggers assertions in the debug build; fix please */
5652 if (rc == VINF_IOM_R3_MMIO_WRITE)
5653 rc = VINF_IOM_R3_IOPORT_WRITE;
5654 break;
5655 default:
5656 E1kLog(("%s e1kIOPortOut: invalid port %#010x\n", szInst, port));
5657 /** @todo Do we need to return an error here?
5658 * bird: VINF_SUCCESS is fine for unhandled cases of an OUT handler. (If you're curious
5659 * about the guest code and a bit adventuresome, try rc = PDMDeviceDBGFStop(...);) */
5660 rc = PDMDevHlpDBGFStop(pDevIns, RT_SRC_POS, "e1kIOPortOut: invalid port %#010x\n", port);
5661 }
5662 }
5663
5664 STAM_PROFILE_ADV_STOP(&pState->CTX_SUFF_Z(StatIOWrite), a);
5665 return rc;
5666}
5667
5668#ifdef IN_RING3
5669/**
5670 * Dump complete device state to log.
5671 *
5672 * @param pState Pointer to device state.
5673 */
5674static void e1kDumpState(E1KSTATE *pState)
5675{
5676 for (int i = 0; i<E1K_NUM_OF_32BIT_REGS; ++i)
5677 {
5678 E1kLog2(("%s %8.8s = %08x\n", INSTANCE(pState),
5679 s_e1kRegMap[i].abbrev, pState->auRegs[i]));
5680 }
5681#ifdef E1K_INT_STATS
5682 LogRel(("%s Interrupt attempts: %d\n", INSTANCE(pState), pState->uStatIntTry));
5683 LogRel(("%s Interrupts raised : %d\n", INSTANCE(pState), pState->uStatInt));
5684 LogRel(("%s Interrupts lowered: %d\n", INSTANCE(pState), pState->uStatIntLower));
5685 LogRel(("%s Interrupts delayed: %d\n", INSTANCE(pState), pState->uStatIntDly));
5686 LogRel(("%s Disabled delayed: %d\n", INSTANCE(pState), pState->uStatDisDly));
5687 LogRel(("%s Interrupts skipped: %d\n", INSTANCE(pState), pState->uStatIntSkip));
5688 LogRel(("%s Masked interrupts : %d\n", INSTANCE(pState), pState->uStatIntMasked));
5689 LogRel(("%s Early interrupts : %d\n", INSTANCE(pState), pState->uStatIntEarly));
5690 LogRel(("%s Late interrupts : %d\n", INSTANCE(pState), pState->uStatIntLate));
5691 LogRel(("%s Lost interrupts : %d\n", INSTANCE(pState), pState->iStatIntLost));
5692 LogRel(("%s Interrupts by RX : %d\n", INSTANCE(pState), pState->uStatIntRx));
5693 LogRel(("%s Interrupts by TX : %d\n", INSTANCE(pState), pState->uStatIntTx));
5694 LogRel(("%s Interrupts by ICS : %d\n", INSTANCE(pState), pState->uStatIntICS));
5695 LogRel(("%s Interrupts by RDTR: %d\n", INSTANCE(pState), pState->uStatIntRDTR));
5696 LogRel(("%s Interrupts by RDMT: %d\n", INSTANCE(pState), pState->uStatIntRXDMT0));
5697 LogRel(("%s Interrupts by TXQE: %d\n", INSTANCE(pState), pState->uStatIntTXQE));
5698 LogRel(("%s TX int delay asked: %d\n", INSTANCE(pState), pState->uStatTxIDE));
5699 LogRel(("%s TX no report asked: %d\n", INSTANCE(pState), pState->uStatTxNoRS));
5700 LogRel(("%s TX abs timer expd : %d\n", INSTANCE(pState), pState->uStatTAD));
5701 LogRel(("%s TX int timer expd : %d\n", INSTANCE(pState), pState->uStatTID));
5702 LogRel(("%s RX abs timer expd : %d\n", INSTANCE(pState), pState->uStatRAD));
5703 LogRel(("%s RX int timer expd : %d\n", INSTANCE(pState), pState->uStatRID));
5704 LogRel(("%s TX CTX descriptors: %d\n", INSTANCE(pState), pState->uStatDescCtx));
5705 LogRel(("%s TX DAT descriptors: %d\n", INSTANCE(pState), pState->uStatDescDat));
5706 LogRel(("%s TX LEG descriptors: %d\n", INSTANCE(pState), pState->uStatDescLeg));
5707 LogRel(("%s Received frames : %d\n", INSTANCE(pState), pState->uStatRxFrm));
5708 LogRel(("%s Transmitted frames: %d\n", INSTANCE(pState), pState->uStatTxFrm));
5709#endif /* E1K_INT_STATS */
5710}
5711
5712/**
5713 * Map PCI I/O region.
5714 *
5715 * @return VBox status code.
5716 * @param pPciDev Pointer to PCI device. Use pPciDev->pDevIns to get the device instance.
5717 * @param iRegion The region number.
5718 * @param GCPhysAddress Physical address of the region. If iType is PCI_ADDRESS_SPACE_IO, this is an
5719 * I/O port, else it's a physical address.
5720 * This address is *NOT* relative to pci_mem_base like earlier!
5721 * @param cb Region size.
5722 * @param enmType One of the PCI_ADDRESS_SPACE_* values.
5723 * @thread EMT
5724 */
5725static DECLCALLBACK(int) e1kMap(PPCIDEVICE pPciDev, int iRegion,
5726 RTGCPHYS GCPhysAddress, uint32_t cb, PCIADDRESSSPACE enmType)
5727{
5728 int rc;
5729 E1KSTATE *pState = PDMINS_2_DATA(pPciDev->pDevIns, E1KSTATE*);
5730
5731 switch (enmType)
5732 {
5733 case PCI_ADDRESS_SPACE_IO:
5734 pState->addrIOPort = (RTIOPORT)GCPhysAddress;
5735 rc = PDMDevHlpIOPortRegister(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5736 e1kIOPortOut, e1kIOPortIn, NULL, NULL, "E1000");
5737 if (RT_FAILURE(rc))
5738 break;
5739 if (pState->fR0Enabled)
5740 {
5741 rc = PDMDevHlpIOPortRegisterR0(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5742 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5743 if (RT_FAILURE(rc))
5744 break;
5745 }
5746 if (pState->fGCEnabled)
5747 {
5748 rc = PDMDevHlpIOPortRegisterRC(pPciDev->pDevIns, pState->addrIOPort, cb, 0,
5749 "e1kIOPortOut", "e1kIOPortIn", NULL, NULL, "E1000");
5750 }
5751 break;
5752 case PCI_ADDRESS_SPACE_MEM:
5753 pState->addrMMReg = GCPhysAddress;
5754 rc = PDMDevHlpMMIORegister(pPciDev->pDevIns, GCPhysAddress, cb, NULL /*pvUser*/,
5755 IOMMMIO_FLAGS_READ_PASSTHRU | IOMMMIO_FLAGS_WRITE_PASSTHRU,
5756 e1kMMIOWrite, e1kMMIORead, "E1000");
5757 if (pState->fR0Enabled)
5758 {
5759 rc = PDMDevHlpMMIORegisterR0(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTR0PTR /*pvUser*/,
5760 "e1kMMIOWrite", "e1kMMIORead");
5761 if (RT_FAILURE(rc))
5762 break;
5763 }
5764 if (pState->fGCEnabled)
5765 {
5766 rc = PDMDevHlpMMIORegisterRC(pPciDev->pDevIns, GCPhysAddress, cb, NIL_RTRCPTR /*pvUser*/,
5767 "e1kMMIOWrite", "e1kMMIORead");
5768 }
5769 break;
5770 default:
5771 /* We should never get here */
5772 AssertMsgFailed(("Invalid PCI address space param in map callback"));
5773 rc = VERR_INTERNAL_ERROR;
5774 break;
5775 }
5776 return rc;
5777}
5778
5779/**
5780 * Check if the device can receive data now.
5781 * This must be called before the pfnRecieve() method is called.
5782 *
5783 * @returns Number of bytes the device can receive.
5784 * @param pInterface Pointer to the interface structure containing the called function pointer.
5785 * @thread EMT
5786 */
5787static int e1kCanReceive(E1KSTATE *pState)
5788{
5789#ifndef E1K_WITH_RXD_CACHE
5790 size_t cb;
5791
5792 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
5793 return VERR_NET_NO_BUFFER_SPACE;
5794
5795 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
5796 {
5797 E1KRXDESC desc;
5798 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
5799 &desc, sizeof(desc));
5800 if (desc.status.fDD)
5801 cb = 0;
5802 else
5803 cb = pState->u16RxBSize;
5804 }
5805 else if (RDH < RDT)
5806 cb = (RDT - RDH) * pState->u16RxBSize;
5807 else if (RDH > RDT)
5808 cb = (RDLEN/sizeof(E1KRXDESC) - RDH + RDT) * pState->u16RxBSize;
5809 else
5810 {
5811 cb = 0;
5812 E1kLogRel(("E1000: OUT of RX descriptors!\n"));
5813 }
5814 E1kLog2(("%s e1kCanReceive: at exit RDH=%d RDT=%d RDLEN=%d u16RxBSize=%d cb=%lu\n",
5815 INSTANCE(pState), RDH, RDT, RDLEN, pState->u16RxBSize, cb));
5816
5817 e1kCsRxLeave(pState);
5818 return cb > 0 ? VINF_SUCCESS : VERR_NET_NO_BUFFER_SPACE;
5819#else /* E1K_WITH_RXD_CACHE */
5820 int rc = VINF_SUCCESS;
5821
5822 if (RT_UNLIKELY(e1kCsRxEnter(pState, VERR_SEM_BUSY) != VINF_SUCCESS))
5823 return VERR_NET_NO_BUFFER_SPACE;
5824
5825 if (RT_UNLIKELY(RDLEN == sizeof(E1KRXDESC)))
5826 {
5827 E1KRXDESC desc;
5828 PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), e1kDescAddr(RDBAH, RDBAL, RDH),
5829 &desc, sizeof(desc));
5830 if (desc.status.fDD)
5831 rc = VERR_NET_NO_BUFFER_SPACE;
5832 }
5833 else if (e1kRxDIsCacheEmpty(pState) && RDH == RDT)
5834 {
5835 /* Cache is empty, so is the RX ring. */
5836 rc = VERR_NET_NO_BUFFER_SPACE;
5837 }
5838 E1kLog2(("%s e1kCanReceive: at exit in_cache=%d RDH=%d RDT=%d RDLEN=%d"
5839 " u16RxBSize=%d rc=%Rrc\n", INSTANCE(pState),
5840 e1kRxDInCache(pState), RDH, RDT, RDLEN, pState->u16RxBSize, rc));
5841
5842 e1kCsRxLeave(pState);
5843 return rc;
5844#endif /* E1K_WITH_RXD_CACHE */
5845}
5846
5847/**
5848 * @interface_method_impl{PDMINETWORKDOWN,pfnWaitReceiveAvail}
5849 */
5850static DECLCALLBACK(int) e1kNetworkDown_WaitReceiveAvail(PPDMINETWORKDOWN pInterface, RTMSINTERVAL cMillies)
5851{
5852 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
5853 int rc = e1kCanReceive(pState);
5854
5855 if (RT_SUCCESS(rc))
5856 return VINF_SUCCESS;
5857 if (RT_UNLIKELY(cMillies == 0))
5858 return VERR_NET_NO_BUFFER_SPACE;
5859
5860 rc = VERR_INTERRUPTED;
5861 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, true);
5862 STAM_PROFILE_START(&pState->StatRxOverflow, a);
5863 VMSTATE enmVMState;
5864 while (RT_LIKELY( (enmVMState = PDMDevHlpVMState(pState->CTX_SUFF(pDevIns))) == VMSTATE_RUNNING
5865 || enmVMState == VMSTATE_RUNNING_LS))
5866 {
5867 int rc2 = e1kCanReceive(pState);
5868 if (RT_SUCCESS(rc2))
5869 {
5870 rc = VINF_SUCCESS;
5871 break;
5872 }
5873 E1kLogRel(("E1000 e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n",
5874 cMillies));
5875 E1kLog(("%s e1kNetworkDown_WaitReceiveAvail: waiting cMillies=%u...\n",
5876 INSTANCE(pState), cMillies));
5877 RTSemEventWait(pState->hEventMoreRxDescAvail, cMillies);
5878 }
5879 STAM_PROFILE_STOP(&pState->StatRxOverflow, a);
5880 ASMAtomicXchgBool(&pState->fMaybeOutOfSpace, false);
5881
5882 return rc;
5883}
5884
5885
5886/**
5887 * Matches the packet addresses against Receive Address table. Looks for
5888 * exact matches only.
5889 *
5890 * @returns true if address matches.
5891 * @param pState Pointer to the state structure.
5892 * @param pvBuf The ethernet packet.
5893 * @param cb Number of bytes available in the packet.
5894 * @thread EMT
5895 */
5896static bool e1kPerfectMatch(E1KSTATE *pState, const void *pvBuf)
5897{
5898 for (unsigned i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
5899 {
5900 E1KRAELEM* ra = pState->aRecAddr.array + i;
5901
5902 /* Valid address? */
5903 if (ra->ctl & RA_CTL_AV)
5904 {
5905 Assert((ra->ctl & RA_CTL_AS) < 2);
5906 //unsigned char *pAddr = (unsigned char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS);
5907 //E1kLog3(("%s Matching %02x:%02x:%02x:%02x:%02x:%02x against %02x:%02x:%02x:%02x:%02x:%02x...\n",
5908 // INSTANCE(pState), pAddr[0], pAddr[1], pAddr[2], pAddr[3], pAddr[4], pAddr[5],
5909 // ra->addr[0], ra->addr[1], ra->addr[2], ra->addr[3], ra->addr[4], ra->addr[5]));
5910 /*
5911 * Address Select:
5912 * 00b = Destination address
5913 * 01b = Source address
5914 * 10b = Reserved
5915 * 11b = Reserved
5916 * Since ethernet header is (DA, SA, len) we can use address
5917 * select as index.
5918 */
5919 if (memcmp((char*)pvBuf + sizeof(ra->addr)*(ra->ctl & RA_CTL_AS),
5920 ra->addr, sizeof(ra->addr)) == 0)
5921 return true;
5922 }
5923 }
5924
5925 return false;
5926}
5927
5928/**
5929 * Matches the packet addresses against Multicast Table Array.
5930 *
5931 * @remarks This is imperfect match since it matches not exact address but
5932 * a subset of addresses.
5933 *
5934 * @returns true if address matches.
5935 * @param pState Pointer to the state structure.
5936 * @param pvBuf The ethernet packet.
5937 * @param cb Number of bytes available in the packet.
5938 * @thread EMT
5939 */
5940static bool e1kImperfectMatch(E1KSTATE *pState, const void *pvBuf)
5941{
5942 /* Get bits 32..47 of destination address */
5943 uint16_t u16Bit = ((uint16_t*)pvBuf)[2];
5944
5945 unsigned offset = GET_BITS(RCTL, MO);
5946 /*
5947 * offset means:
5948 * 00b = bits 36..47
5949 * 01b = bits 35..46
5950 * 10b = bits 34..45
5951 * 11b = bits 32..43
5952 */
5953 if (offset < 3)
5954 u16Bit = u16Bit >> (4 - offset);
5955 return ASMBitTest(pState->auMTA, u16Bit & 0xFFF);
5956}
5957
5958/**
5959 * Determines if the packet is to be delivered to upper layer. The following
5960 * filters supported:
5961 * - Exact Unicast/Multicast
5962 * - Promiscuous Unicast/Multicast
5963 * - Multicast
5964 * - VLAN
5965 *
5966 * @returns true if packet is intended for this node.
5967 * @param pState Pointer to the state structure.
5968 * @param pvBuf The ethernet packet.
5969 * @param cb Number of bytes available in the packet.
5970 * @param pStatus Bit field to store status bits.
5971 * @thread EMT
5972 */
5973static bool e1kAddressFilter(E1KSTATE *pState, const void *pvBuf, size_t cb, E1KRXDST *pStatus)
5974{
5975 Assert(cb > 14);
5976 /* Assume that we fail to pass exact filter. */
5977 pStatus->fPIF = false;
5978 pStatus->fVP = false;
5979 /* Discard oversized packets */
5980 if (cb > E1K_MAX_RX_PKT_SIZE)
5981 {
5982 E1kLog(("%s ERROR: Incoming packet is too big, cb=%d > max=%d\n",
5983 INSTANCE(pState), cb, E1K_MAX_RX_PKT_SIZE));
5984 E1K_INC_CNT32(ROC);
5985 return false;
5986 }
5987 else if (!(RCTL & RCTL_LPE) && cb > 1522)
5988 {
5989 /* When long packet reception is disabled packets over 1522 are discarded */
5990 E1kLog(("%s Discarding incoming packet (LPE=0), cb=%d\n",
5991 INSTANCE(pState), cb));
5992 E1K_INC_CNT32(ROC);
5993 return false;
5994 }
5995
5996 uint16_t *u16Ptr = (uint16_t*)pvBuf;
5997 /* Compare TPID with VLAN Ether Type */
5998 if (RT_BE2H_U16(u16Ptr[6]) == VET)
5999 {
6000 pStatus->fVP = true;
6001 /* Is VLAN filtering enabled? */
6002 if (RCTL & RCTL_VFE)
6003 {
6004 /* It is 802.1q packet indeed, let's filter by VID */
6005 if (RCTL & RCTL_CFIEN)
6006 {
6007 E1kLog3(("%s VLAN filter: VLAN=%d CFI=%d RCTL_CFI=%d\n", INSTANCE(pState),
6008 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7])),
6009 E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])),
6010 !!(RCTL & RCTL_CFI)));
6011 if (E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])) != !!(RCTL & RCTL_CFI))
6012 {
6013 E1kLog2(("%s Packet filter: CFIs do not match in packet and RCTL (%d!=%d)\n",
6014 INSTANCE(pState), E1K_SPEC_CFI(RT_BE2H_U16(u16Ptr[7])), !!(RCTL & RCTL_CFI)));
6015 return false;
6016 }
6017 }
6018 else
6019 E1kLog3(("%s VLAN filter: VLAN=%d\n", INSTANCE(pState),
6020 E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6021 if (!ASMBitTest(pState->auVFTA, E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))))
6022 {
6023 E1kLog2(("%s Packet filter: no VLAN match (id=%d)\n",
6024 INSTANCE(pState), E1K_SPEC_VLAN(RT_BE2H_U16(u16Ptr[7]))));
6025 return false;
6026 }
6027 }
6028 }
6029 /* Broadcast filtering */
6030 if (e1kIsBroadcast(pvBuf) && (RCTL & RCTL_BAM))
6031 return true;
6032 E1kLog2(("%s Packet filter: not a broadcast\n", INSTANCE(pState)));
6033 if (e1kIsMulticast(pvBuf))
6034 {
6035 /* Is multicast promiscuous enabled? */
6036 if (RCTL & RCTL_MPE)
6037 return true;
6038 E1kLog2(("%s Packet filter: no promiscuous multicast\n", INSTANCE(pState)));
6039 /* Try perfect matches first */
6040 if (e1kPerfectMatch(pState, pvBuf))
6041 {
6042 pStatus->fPIF = true;
6043 return true;
6044 }
6045 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
6046 if (e1kImperfectMatch(pState, pvBuf))
6047 return true;
6048 E1kLog2(("%s Packet filter: no imperfect match\n", INSTANCE(pState)));
6049 }
6050 else {
6051 /* Is unicast promiscuous enabled? */
6052 if (RCTL & RCTL_UPE)
6053 return true;
6054 E1kLog2(("%s Packet filter: no promiscuous unicast\n", INSTANCE(pState)));
6055 if (e1kPerfectMatch(pState, pvBuf))
6056 {
6057 pStatus->fPIF = true;
6058 return true;
6059 }
6060 E1kLog2(("%s Packet filter: no perfect match\n", INSTANCE(pState)));
6061 }
6062 E1kLog2(("%s Packet filter: packet discarded\n", INSTANCE(pState)));
6063 return false;
6064}
6065
6066/**
6067 * @interface_method_impl{PDMINETWORKDOWN,pfnReceive}
6068 */
6069static DECLCALLBACK(int) e1kNetworkDown_Receive(PPDMINETWORKDOWN pInterface, const void *pvBuf, size_t cb)
6070{
6071 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkDown);
6072 int rc = VINF_SUCCESS;
6073
6074 /*
6075 * Drop packets if the VM is not running yet/anymore.
6076 */
6077 VMSTATE enmVMState = PDMDevHlpVMState(STATE_TO_DEVINS(pState));
6078 if ( enmVMState != VMSTATE_RUNNING
6079 && enmVMState != VMSTATE_RUNNING_LS)
6080 {
6081 E1kLog(("%s Dropping incoming packet as VM is not running.\n", INSTANCE(pState)));
6082 return VINF_SUCCESS;
6083 }
6084
6085 /* Discard incoming packets in locked state */
6086 if (!(RCTL & RCTL_EN) || pState->fLocked || !(STATUS & STATUS_LU))
6087 {
6088 E1kLog(("%s Dropping incoming packet as receive operation is disabled.\n", INSTANCE(pState)));
6089 return VINF_SUCCESS;
6090 }
6091
6092 STAM_PROFILE_ADV_START(&pState->StatReceive, a);
6093
6094 //if (!e1kCsEnter(pState, RT_SRC_POS))
6095 // return VERR_PERMISSION_DENIED;
6096
6097 e1kPacketDump(pState, (const uint8_t*)pvBuf, cb, "<-- Incoming");
6098
6099 /* Update stats */
6100 if (RT_LIKELY(e1kCsEnter(pState, VERR_SEM_BUSY) == VINF_SUCCESS))
6101 {
6102 E1K_INC_CNT32(TPR);
6103 E1K_ADD_CNT64(TORL, TORH, cb < 64? 64 : cb);
6104 e1kCsLeave(pState);
6105 }
6106 STAM_PROFILE_ADV_START(&pState->StatReceiveFilter, a);
6107 E1KRXDST status;
6108 RT_ZERO(status);
6109 bool fPassed = e1kAddressFilter(pState, pvBuf, cb, &status);
6110 STAM_PROFILE_ADV_STOP(&pState->StatReceiveFilter, a);
6111 if (fPassed)
6112 {
6113 rc = e1kHandleRxPacket(pState, pvBuf, cb, status);
6114 }
6115 //e1kCsLeave(pState);
6116 STAM_PROFILE_ADV_STOP(&pState->StatReceive, a);
6117
6118 return rc;
6119}
6120
6121/**
6122 * Gets the pointer to the status LED of a unit.
6123 *
6124 * @returns VBox status code.
6125 * @param pInterface Pointer to the interface structure.
6126 * @param iLUN The unit which status LED we desire.
6127 * @param ppLed Where to store the LED pointer.
6128 * @thread EMT
6129 */
6130static DECLCALLBACK(int) e1kQueryStatusLed(PPDMILEDPORTS pInterface, unsigned iLUN, PPDMLED *ppLed)
6131{
6132 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, ILeds);
6133 int rc = VERR_PDM_LUN_NOT_FOUND;
6134
6135 if (iLUN == 0)
6136 {
6137 *ppLed = &pState->led;
6138 rc = VINF_SUCCESS;
6139 }
6140 return rc;
6141}
6142
6143/**
6144 * Gets the current Media Access Control (MAC) address.
6145 *
6146 * @returns VBox status code.
6147 * @param pInterface Pointer to the interface structure containing the called function pointer.
6148 * @param pMac Where to store the MAC address.
6149 * @thread EMT
6150 */
6151static DECLCALLBACK(int) e1kGetMac(PPDMINETWORKCONFIG pInterface, PRTMAC pMac)
6152{
6153 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6154 pState->eeprom.getMac(pMac);
6155 return VINF_SUCCESS;
6156}
6157
6158
6159/**
6160 * Gets the new link state.
6161 *
6162 * @returns The current link state.
6163 * @param pInterface Pointer to the interface structure containing the called function pointer.
6164 * @thread EMT
6165 */
6166static DECLCALLBACK(PDMNETWORKLINKSTATE) e1kGetLinkState(PPDMINETWORKCONFIG pInterface)
6167{
6168 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6169 if (STATUS & STATUS_LU)
6170 return PDMNETWORKLINKSTATE_UP;
6171 return PDMNETWORKLINKSTATE_DOWN;
6172}
6173
6174
6175/**
6176 * Sets the new link state.
6177 *
6178 * @returns VBox status code.
6179 * @param pInterface Pointer to the interface structure containing the called function pointer.
6180 * @param enmState The new link state
6181 * @thread EMT
6182 */
6183static DECLCALLBACK(int) e1kSetLinkState(PPDMINETWORKCONFIG pInterface, PDMNETWORKLINKSTATE enmState)
6184{
6185 E1KSTATE *pState = RT_FROM_MEMBER(pInterface, E1KSTATE, INetworkConfig);
6186 bool fOldUp = !!(STATUS & STATUS_LU);
6187 bool fNewUp = enmState == PDMNETWORKLINKSTATE_UP;
6188
6189 if ( fNewUp != fOldUp
6190 || (!fNewUp && pState->fCableConnected)) /* old state was connected but STATUS not
6191 * yet written by guest */
6192 {
6193 if (fNewUp)
6194 {
6195 E1kLog(("%s Link will be up in approximately %d secs\n",
6196 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
6197 pState->fCableConnected = true;
6198 STATUS &= ~STATUS_LU;
6199 Phy::setLinkStatus(&pState->phy, false);
6200 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6201 /* Restore the link back in 5 seconds (by default). */
6202 e1kBringLinkUpDelayed(pState);
6203 }
6204 else
6205 {
6206 E1kLog(("%s Link is down\n", INSTANCE(pState)));
6207 pState->fCableConnected = false;
6208 STATUS &= ~STATUS_LU;
6209 Phy::setLinkStatus(&pState->phy, false);
6210 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6211 }
6212 if (pState->pDrvR3)
6213 pState->pDrvR3->pfnNotifyLinkChanged(pState->pDrvR3, enmState);
6214 }
6215 return VINF_SUCCESS;
6216}
6217
6218/**
6219 * @interface_method_impl{PDMIBASE,pfnQueryInterface}
6220 */
6221static DECLCALLBACK(void *) e1kQueryInterface(struct PDMIBASE *pInterface, const char *pszIID)
6222{
6223 E1KSTATE *pThis = RT_FROM_MEMBER(pInterface, E1KSTATE, IBase);
6224 Assert(&pThis->IBase == pInterface);
6225
6226 PDMIBASE_RETURN_INTERFACE(pszIID, PDMIBASE, &pThis->IBase);
6227 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKDOWN, &pThis->INetworkDown);
6228 PDMIBASE_RETURN_INTERFACE(pszIID, PDMINETWORKCONFIG, &pThis->INetworkConfig);
6229 PDMIBASE_RETURN_INTERFACE(pszIID, PDMILEDPORTS, &pThis->ILeds);
6230 return NULL;
6231}
6232
6233/**
6234 * Saves the configuration.
6235 *
6236 * @param pState The E1K state.
6237 * @param pSSM The handle to the saved state.
6238 */
6239static void e1kSaveConfig(E1KSTATE *pState, PSSMHANDLE pSSM)
6240{
6241 SSMR3PutMem(pSSM, &pState->macConfigured, sizeof(pState->macConfigured));
6242 SSMR3PutU32(pSSM, pState->eChip);
6243}
6244
6245/**
6246 * Live save - save basic configuration.
6247 *
6248 * @returns VBox status code.
6249 * @param pDevIns The device instance.
6250 * @param pSSM The handle to the saved state.
6251 * @param uPass
6252 */
6253static DECLCALLBACK(int) e1kLiveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uPass)
6254{
6255 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6256 e1kSaveConfig(pState, pSSM);
6257 return VINF_SSM_DONT_CALL_AGAIN;
6258}
6259
6260/**
6261 * Prepares for state saving.
6262 *
6263 * @returns VBox status code.
6264 * @param pDevIns The device instance.
6265 * @param pSSM The handle to the saved state.
6266 */
6267static DECLCALLBACK(int) e1kSavePrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6268{
6269 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6270
6271 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
6272 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6273 return rc;
6274 e1kCsLeave(pState);
6275 return VINF_SUCCESS;
6276#if 0
6277 /* 1) Prevent all threads from modifying the state and memory */
6278 //pState->fLocked = true;
6279 /* 2) Cancel all timers */
6280#ifdef E1K_USE_TX_TIMERS
6281 e1kCancelTimer(pState, pState->CTX_SUFF(pTIDTimer));
6282#ifndef E1K_NO_TAD
6283 e1kCancelTimer(pState, pState->CTX_SUFF(pTADTimer));
6284#endif /* E1K_NO_TAD */
6285#endif /* E1K_USE_TX_TIMERS */
6286#ifdef E1K_USE_RX_TIMERS
6287 e1kCancelTimer(pState, pState->CTX_SUFF(pRIDTimer));
6288 e1kCancelTimer(pState, pState->CTX_SUFF(pRADTimer));
6289#endif /* E1K_USE_RX_TIMERS */
6290 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
6291 /* 3) Did I forget anything? */
6292 E1kLog(("%s Locked\n", INSTANCE(pState)));
6293 return VINF_SUCCESS;
6294#endif
6295}
6296
6297
6298/**
6299 * Saves the state of device.
6300 *
6301 * @returns VBox status code.
6302 * @param pDevIns The device instance.
6303 * @param pSSM The handle to the saved state.
6304 */
6305static DECLCALLBACK(int) e1kSaveExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6306{
6307 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6308
6309 e1kSaveConfig(pState, pSSM);
6310 pState->eeprom.save(pSSM);
6311 e1kDumpState(pState);
6312 SSMR3PutMem(pSSM, pState->auRegs, sizeof(pState->auRegs));
6313 SSMR3PutBool(pSSM, pState->fIntRaised);
6314 Phy::saveState(pSSM, &pState->phy);
6315 SSMR3PutU32(pSSM, pState->uSelectedReg);
6316 SSMR3PutMem(pSSM, pState->auMTA, sizeof(pState->auMTA));
6317 SSMR3PutMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6318 SSMR3PutMem(pSSM, pState->auVFTA, sizeof(pState->auVFTA));
6319 SSMR3PutU64(pSSM, pState->u64AckedAt);
6320 SSMR3PutU16(pSSM, pState->u16RxBSize);
6321 //SSMR3PutBool(pSSM, pState->fDelayInts);
6322 //SSMR3PutBool(pSSM, pState->fIntMaskUsed);
6323 SSMR3PutU16(pSSM, pState->u16TxPktLen);
6324/** @todo State wrt to the TSE buffer is incomplete, so little point in
6325 * saving this actually. */
6326 SSMR3PutMem(pSSM, pState->aTxPacketFallback, pState->u16TxPktLen);
6327 SSMR3PutBool(pSSM, pState->fIPcsum);
6328 SSMR3PutBool(pSSM, pState->fTCPcsum);
6329 SSMR3PutMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6330 SSMR3PutMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6331 SSMR3PutBool(pSSM, pState->fVTag);
6332 SSMR3PutU16(pSSM, pState->u16VTagTCI);
6333#ifdef E1K_WITH_TXD_CACHE
6334 SSMR3PutU8(pSSM, pState->nTxDFetched);
6335 SSMR3PutMem(pSSM, pState->aTxDescriptors,
6336 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6337#endif /* E1K_WITH_TXD_CACHE */
6338/**@todo GSO requires some more state here. */
6339 E1kLog(("%s State has been saved\n", INSTANCE(pState)));
6340 return VINF_SUCCESS;
6341}
6342
6343#if 0
6344/**
6345 * Cleanup after saving.
6346 *
6347 * @returns VBox status code.
6348 * @param pDevIns The device instance.
6349 * @param pSSM The handle to the saved state.
6350 */
6351static DECLCALLBACK(int) e1kSaveDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6352{
6353 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6354
6355 /* If VM is being powered off unlocking will result in assertions in PGM */
6356 if (PDMDevHlpGetVM(pDevIns)->enmVMState == VMSTATE_RUNNING)
6357 pState->fLocked = false;
6358 else
6359 E1kLog(("%s VM is not running -- remain locked\n", INSTANCE(pState)));
6360 E1kLog(("%s Unlocked\n", INSTANCE(pState)));
6361 return VINF_SUCCESS;
6362}
6363#endif
6364
6365/**
6366 * Sync with .
6367 *
6368 * @returns VBox status code.
6369 * @param pDevIns The device instance.
6370 * @param pSSM The handle to the saved state.
6371 */
6372static DECLCALLBACK(int) e1kLoadPrep(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6373{
6374 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6375
6376 int rc = e1kCsEnter(pState, VERR_SEM_BUSY);
6377 if (RT_UNLIKELY(rc != VINF_SUCCESS))
6378 return rc;
6379 e1kCsLeave(pState);
6380 return VINF_SUCCESS;
6381}
6382
6383/**
6384 * Restore previously saved state of device.
6385 *
6386 * @returns VBox status code.
6387 * @param pDevIns The device instance.
6388 * @param pSSM The handle to the saved state.
6389 * @param uVersion The data unit version number.
6390 * @param uPass The data pass.
6391 */
6392static DECLCALLBACK(int) e1kLoadExec(PPDMDEVINS pDevIns, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
6393{
6394 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6395 int rc;
6396
6397 if ( uVersion != E1K_SAVEDSTATE_VERSION
6398#ifdef E1K_WITH_TXD_CACHE
6399 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG
6400#endif /* E1K_WITH_TXD_CACHE */
6401 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_41
6402 && uVersion != E1K_SAVEDSTATE_VERSION_VBOX_30)
6403 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
6404
6405 if ( uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30
6406 || uPass != SSM_PASS_FINAL)
6407 {
6408 /* config checks */
6409 RTMAC macConfigured;
6410 rc = SSMR3GetMem(pSSM, &macConfigured, sizeof(macConfigured));
6411 AssertRCReturn(rc, rc);
6412 if ( memcmp(&macConfigured, &pState->macConfigured, sizeof(macConfigured))
6413 && (uPass == 0 || !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)) )
6414 LogRel(("%s: The mac address differs: config=%RTmac saved=%RTmac\n", INSTANCE(pState), &pState->macConfigured, &macConfigured));
6415
6416 E1KCHIP eChip;
6417 rc = SSMR3GetU32(pSSM, &eChip);
6418 AssertRCReturn(rc, rc);
6419 if (eChip != pState->eChip)
6420 return SSMR3SetCfgError(pSSM, RT_SRC_POS, N_("The chip type differs: config=%u saved=%u"), pState->eChip, eChip);
6421 }
6422
6423 if (uPass == SSM_PASS_FINAL)
6424 {
6425 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_30)
6426 {
6427 rc = pState->eeprom.load(pSSM);
6428 AssertRCReturn(rc, rc);
6429 }
6430 /* the state */
6431 SSMR3GetMem(pSSM, &pState->auRegs, sizeof(pState->auRegs));
6432 SSMR3GetBool(pSSM, &pState->fIntRaised);
6433 /** @todo: PHY could be made a separate device with its own versioning */
6434 Phy::loadState(pSSM, &pState->phy);
6435 SSMR3GetU32(pSSM, &pState->uSelectedReg);
6436 SSMR3GetMem(pSSM, &pState->auMTA, sizeof(pState->auMTA));
6437 SSMR3GetMem(pSSM, &pState->aRecAddr, sizeof(pState->aRecAddr));
6438 SSMR3GetMem(pSSM, &pState->auVFTA, sizeof(pState->auVFTA));
6439 SSMR3GetU64(pSSM, &pState->u64AckedAt);
6440 SSMR3GetU16(pSSM, &pState->u16RxBSize);
6441 //SSMR3GetBool(pSSM, pState->fDelayInts);
6442 //SSMR3GetBool(pSSM, pState->fIntMaskUsed);
6443 SSMR3GetU16(pSSM, &pState->u16TxPktLen);
6444 SSMR3GetMem(pSSM, &pState->aTxPacketFallback[0], pState->u16TxPktLen);
6445 SSMR3GetBool(pSSM, &pState->fIPcsum);
6446 SSMR3GetBool(pSSM, &pState->fTCPcsum);
6447 SSMR3GetMem(pSSM, &pState->contextTSE, sizeof(pState->contextTSE));
6448 rc = SSMR3GetMem(pSSM, &pState->contextNormal, sizeof(pState->contextNormal));
6449 AssertRCReturn(rc, rc);
6450 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_41)
6451 {
6452 SSMR3GetBool(pSSM, &pState->fVTag);
6453 rc = SSMR3GetU16(pSSM, &pState->u16VTagTCI);
6454 AssertRCReturn(rc, rc);
6455 }
6456 else
6457 {
6458 pState->fVTag = false;
6459 pState->u16VTagTCI = 0;
6460 }
6461#ifdef E1K_WITH_TXD_CACHE
6462 if (uVersion > E1K_SAVEDSTATE_VERSION_VBOX_42_VTAG)
6463 {
6464 rc = SSMR3GetU8(pSSM, &pState->nTxDFetched);
6465 AssertRCReturn(rc, rc);
6466 SSMR3GetMem(pSSM, pState->aTxDescriptors,
6467 pState->nTxDFetched * sizeof(pState->aTxDescriptors[0]));
6468 }
6469 else
6470 pState->nTxDFetched = 0;
6471 /*
6472 * @todo: Perhaps we should not store TXD cache as the entries can be
6473 * simply fetched again from guest's memory. Or can't they?
6474 */
6475#endif /* E1K_WITH_TXD_CACHE */
6476#ifdef E1K_WITH_RXD_CACHE
6477 /*
6478 * There is no point in storing the RX descriptor cache in the saved
6479 * state, we just need to make sure it is empty.
6480 */
6481 pState->iRxDCurrent = pState->nRxDFetched = 0;
6482#endif /* E1K_WITH_RXD_CACHE */
6483 /* derived state */
6484 e1kSetupGsoCtx(&pState->GsoCtx, &pState->contextTSE);
6485
6486 E1kLog(("%s State has been restored\n", INSTANCE(pState)));
6487 e1kDumpState(pState);
6488 }
6489 return VINF_SUCCESS;
6490}
6491
6492/**
6493 * Link status adjustments after loading.
6494 *
6495 * @returns VBox status code.
6496 * @param pDevIns The device instance.
6497 * @param pSSM The handle to the saved state.
6498 */
6499static DECLCALLBACK(int) e1kLoadDone(PPDMDEVINS pDevIns, PSSMHANDLE pSSM)
6500{
6501 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6502
6503 /* Update promiscuous mode */
6504 if (pState->pDrvR3)
6505 pState->pDrvR3->pfnSetPromiscuousMode(pState->pDrvR3,
6506 !!(RCTL & (RCTL_UPE | RCTL_MPE)));
6507
6508 /*
6509 * Force the link down here, since PDMNETWORKLINKSTATE_DOWN_RESUME is never
6510 * passed to us. We go through all this stuff if the link was up and we
6511 * wasn't teleported.
6512 */
6513 if ( (STATUS & STATUS_LU)
6514 && !PDMDevHlpVMTeleportedAndNotFullyResumedYet(pDevIns)
6515 && pState->cMsLinkUpDelay)
6516 {
6517 E1kLog(("%s Link is down temporarily\n", INSTANCE(pState)));
6518 STATUS &= ~STATUS_LU;
6519 Phy::setLinkStatus(&pState->phy, false);
6520 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6521 /* Restore the link back in five seconds (default). */
6522 e1kBringLinkUpDelayed(pState);
6523 }
6524 return VINF_SUCCESS;
6525}
6526
6527
6528/* -=-=-=-=- PDMDEVREG -=-=-=-=- */
6529
6530/**
6531 * Detach notification.
6532 *
6533 * One port on the network card has been disconnected from the network.
6534 *
6535 * @param pDevIns The device instance.
6536 * @param iLUN The logical unit which is being detached.
6537 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
6538 */
6539static DECLCALLBACK(void) e1kDetach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
6540{
6541 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6542 Log(("%s e1kDetach:\n", INSTANCE(pState)));
6543
6544 AssertLogRelReturnVoid(iLUN == 0);
6545
6546 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
6547
6548 /** @todo: r=pritesh still need to check if i missed
6549 * to clean something in this function
6550 */
6551
6552 /*
6553 * Zero some important members.
6554 */
6555 pState->pDrvBase = NULL;
6556 pState->pDrvR3 = NULL;
6557 pState->pDrvR0 = NIL_RTR0PTR;
6558 pState->pDrvRC = NIL_RTRCPTR;
6559
6560 PDMCritSectLeave(&pState->cs);
6561}
6562
6563/**
6564 * Attach the Network attachment.
6565 *
6566 * One port on the network card has been connected to a network.
6567 *
6568 * @returns VBox status code.
6569 * @param pDevIns The device instance.
6570 * @param iLUN The logical unit which is being attached.
6571 * @param fFlags Flags, combination of the PDMDEVATT_FLAGS_* \#defines.
6572 *
6573 * @remarks This code path is not used during construction.
6574 */
6575static DECLCALLBACK(int) e1kAttach(PPDMDEVINS pDevIns, unsigned iLUN, uint32_t fFlags)
6576{
6577 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6578 LogFlow(("%s e1kAttach:\n", INSTANCE(pState)));
6579
6580 AssertLogRelReturn(iLUN == 0, VERR_PDM_NO_SUCH_LUN);
6581
6582 PDMCritSectEnter(&pState->cs, VERR_SEM_BUSY);
6583
6584 /*
6585 * Attach the driver.
6586 */
6587 int rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
6588 if (RT_SUCCESS(rc))
6589 {
6590 if (rc == VINF_NAT_DNS)
6591 {
6592#ifdef RT_OS_LINUX
6593 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6594 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Please check your /etc/resolv.conf for <tt>nameserver</tt> entries. Either add one manually (<i>man resolv.conf</i>) or ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6595#else
6596 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
6597 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
6598#endif
6599 }
6600 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
6601 AssertMsgStmt(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
6602 rc = VERR_PDM_MISSING_INTERFACE_BELOW);
6603 if (RT_SUCCESS(rc))
6604 {
6605 PPDMIBASER0 pBaseR0 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0);
6606 pState->pDrvR0 = pBaseR0 ? pBaseR0->pfnQueryInterface(pBaseR0, PDMINETWORKUP_IID) : NIL_RTR0PTR;
6607
6608 PPDMIBASERC pBaseRC = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC);
6609 pState->pDrvRC = pBaseRC ? pBaseRC->pfnQueryInterface(pBaseRC, PDMINETWORKUP_IID) : NIL_RTR0PTR;
6610 }
6611 }
6612 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
6613 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
6614 {
6615 /* This should never happen because this function is not called
6616 * if there is no driver to attach! */
6617 Log(("%s No attached driver!\n", INSTANCE(pState)));
6618 }
6619
6620 /*
6621 * Temporary set the link down if it was up so that the guest
6622 * will know that we have change the configuration of the
6623 * network card
6624 */
6625 if ((STATUS & STATUS_LU) && RT_SUCCESS(rc))
6626 {
6627 STATUS &= ~STATUS_LU;
6628 Phy::setLinkStatus(&pState->phy, false);
6629 e1kRaiseInterrupt(pState, VERR_SEM_BUSY, ICR_LSC);
6630 /* Restore the link back in 5 seconds (default). */
6631 e1kBringLinkUpDelayed(pState);
6632 }
6633
6634 PDMCritSectLeave(&pState->cs);
6635 return rc;
6636
6637}
6638
6639/**
6640 * @copydoc FNPDMDEVPOWEROFF
6641 */
6642static DECLCALLBACK(void) e1kPowerOff(PPDMDEVINS pDevIns)
6643{
6644 /* Poke thread waiting for buffer space. */
6645 e1kWakeupReceive(pDevIns);
6646}
6647
6648/**
6649 * @copydoc FNPDMDEVRESET
6650 */
6651static DECLCALLBACK(void) e1kReset(PPDMDEVINS pDevIns)
6652{
6653 E1KSTATE *pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6654 e1kCancelTimer(pState, pState->CTX_SUFF(pIntTimer));
6655 e1kCancelTimer(pState, pState->CTX_SUFF(pLUTimer));
6656 e1kXmitFreeBuf(pState);
6657 pState->u16TxPktLen = 0;
6658 pState->fIPcsum = false;
6659 pState->fTCPcsum = false;
6660 pState->fIntMaskUsed = false;
6661 pState->fDelayInts = false;
6662 pState->fLocked = false;
6663 pState->u64AckedAt = 0;
6664 e1kHardReset(pState);
6665}
6666
6667/**
6668 * @copydoc FNPDMDEVSUSPEND
6669 */
6670static DECLCALLBACK(void) e1kSuspend(PPDMDEVINS pDevIns)
6671{
6672 /* Poke thread waiting for buffer space. */
6673 e1kWakeupReceive(pDevIns);
6674}
6675
6676/**
6677 * Device relocation callback.
6678 *
6679 * When this callback is called the device instance data, and if the
6680 * device have a GC component, is being relocated, or/and the selectors
6681 * have been changed. The device must use the chance to perform the
6682 * necessary pointer relocations and data updates.
6683 *
6684 * Before the GC code is executed the first time, this function will be
6685 * called with a 0 delta so GC pointer calculations can be one in one place.
6686 *
6687 * @param pDevIns Pointer to the device instance.
6688 * @param offDelta The relocation delta relative to the old location.
6689 *
6690 * @remark A relocation CANNOT fail.
6691 */
6692static DECLCALLBACK(void) e1kRelocate(PPDMDEVINS pDevIns, RTGCINTPTR offDelta)
6693{
6694 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6695 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
6696 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
6697 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
6698#ifdef E1K_USE_RX_TIMERS
6699 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
6700 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
6701#endif /* E1K_USE_RX_TIMERS */
6702#ifdef E1K_USE_TX_TIMERS
6703 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
6704# ifndef E1K_NO_TAD
6705 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
6706# endif /* E1K_NO_TAD */
6707#endif /* E1K_USE_TX_TIMERS */
6708 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
6709 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
6710}
6711
6712/**
6713 * Destruct a device instance.
6714 *
6715 * We need to free non-VM resources only.
6716 *
6717 * @returns VBox status.
6718 * @param pDevIns The device instance data.
6719 * @thread EMT
6720 */
6721static DECLCALLBACK(int) e1kDestruct(PPDMDEVINS pDevIns)
6722{
6723 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6724 PDMDEV_CHECK_VERSIONS_RETURN_QUIET(pDevIns);
6725
6726 e1kDumpState(pState);
6727 E1kLog(("%s Destroying instance\n", INSTANCE(pState)));
6728 if (PDMCritSectIsInitialized(&pState->cs))
6729 {
6730 if (pState->hEventMoreRxDescAvail != NIL_RTSEMEVENT)
6731 {
6732 RTSemEventSignal(pState->hEventMoreRxDescAvail);
6733 RTSemEventDestroy(pState->hEventMoreRxDescAvail);
6734 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
6735 }
6736#ifdef E1K_WITH_TX_CS
6737 PDMR3CritSectDelete(&pState->csTx);
6738#endif /* E1K_WITH_TX_CS */
6739 PDMR3CritSectDelete(&pState->csRx);
6740 PDMR3CritSectDelete(&pState->cs);
6741 }
6742 return VINF_SUCCESS;
6743}
6744
6745/**
6746 * Dump receive descriptor to debugger info buffer.
6747 *
6748 * @param pState The device state structure.
6749 * @param pHlp The output helpers.
6750 * @param addr Physical address of the descriptor in guest context.
6751 * @param pDesc Pointer to the descriptor.
6752 */
6753static void e1kRDescInfo(E1KSTATE* pState, PCDBGFINFOHLP pHlp, RTGCPHYS addr, E1KRXDESC* pDesc)
6754{
6755 pHlp->pfnPrintf(pHlp, "%RGp: Address=%16LX Length=%04X Csum=%04X\n",
6756 addr, pDesc->u64BufAddr, pDesc->u16Length, pDesc->u16Checksum);
6757 pHlp->pfnPrintf(pHlp, " STA: %s %s %s %s %s %s %s ERR: %s %s %s %s SPECIAL: %s VLAN=%03x PRI=%x\n",
6758 pDesc->status.fPIF ? "PIF" : "pif",
6759 pDesc->status.fIPCS ? "IPCS" : "ipcs",
6760 pDesc->status.fTCPCS ? "TCPCS" : "tcpcs",
6761 pDesc->status.fVP ? "VP" : "vp",
6762 pDesc->status.fIXSM ? "IXSM" : "ixsm",
6763 pDesc->status.fEOP ? "EOP" : "eop",
6764 pDesc->status.fDD ? "DD" : "dd",
6765 pDesc->status.fRXE ? "RXE" : "rxe",
6766 pDesc->status.fIPE ? "IPE" : "ipe",
6767 pDesc->status.fTCPE ? "TCPE" : "tcpe",
6768 pDesc->status.fCE ? "CE" : "ce",
6769 E1K_SPEC_CFI(pDesc->status.u16Special) ? "CFI" :"cfi",
6770 E1K_SPEC_VLAN(pDesc->status.u16Special),
6771 E1K_SPEC_PRI(pDesc->status.u16Special));
6772}
6773
6774/**
6775 * Dump transmit descriptor to debugger info buffer.
6776 *
6777 * @param pState The device state structure.
6778 * @param pHlp The output helpers.
6779 * @param addr Physical address of the descriptor in guest context.
6780 * @param pDesc Pointer to descriptor union.
6781 */
6782static void e1kTDescInfo(E1KSTATE* pState, PCDBGFINFOHLP pHlp, RTGCPHYS addr, E1KTXDESC* pDesc)
6783{
6784 switch (e1kGetDescType(pDesc))
6785 {
6786 case E1K_DTYP_CONTEXT:
6787 pHlp->pfnPrintf(pHlp, "%RGp: Type=Context\n", addr);
6788 pHlp->pfnPrintf(pHlp, " IPCSS=%02X IPCSO=%02X IPCSE=%04X TUCSS=%02X TUCSO=%02X TUCSE=%04X\n",
6789 pDesc->context.ip.u8CSS, pDesc->context.ip.u8CSO, pDesc->context.ip.u16CSE,
6790 pDesc->context.tu.u8CSS, pDesc->context.tu.u8CSO, pDesc->context.tu.u16CSE);
6791 pHlp->pfnPrintf(pHlp, " TUCMD:%s%s%s %s %s PAYLEN=%04x HDRLEN=%04x MSS=%04x STA: %s\n",
6792 pDesc->context.dw2.fIDE ? " IDE":"",
6793 pDesc->context.dw2.fRS ? " RS" :"",
6794 pDesc->context.dw2.fTSE ? " TSE":"",
6795 pDesc->context.dw2.fIP ? "IPv4":"IPv6",
6796 pDesc->context.dw2.fTCP ? "TCP":"UDP",
6797 pDesc->context.dw2.u20PAYLEN,
6798 pDesc->context.dw3.u8HDRLEN,
6799 pDesc->context.dw3.u16MSS,
6800 pDesc->context.dw3.fDD?"DD":"");
6801 break;
6802 case E1K_DTYP_DATA:
6803 pHlp->pfnPrintf(pHlp, "%RGp: Type=Data Address=%16LX DTALEN=%05X\n",
6804 addr,
6805 pDesc->data.u64BufAddr,
6806 pDesc->data.cmd.u20DTALEN);
6807 pHlp->pfnPrintf(pHlp, " DCMD:%s%s%s%s%s%s%s STA:%s%s%s POPTS:%s%s SPECIAL:%s VLAN=%03x PRI=%x\n",
6808 pDesc->data.cmd.fIDE ? " IDE" :"",
6809 pDesc->data.cmd.fVLE ? " VLE" :"",
6810 pDesc->data.cmd.fRPS ? " RPS" :"",
6811 pDesc->data.cmd.fRS ? " RS" :"",
6812 pDesc->data.cmd.fTSE ? " TSE" :"",
6813 pDesc->data.cmd.fIFCS? " IFCS":"",
6814 pDesc->data.cmd.fEOP ? " EOP" :"",
6815 pDesc->data.dw3.fDD ? " DD" :"",
6816 pDesc->data.dw3.fEC ? " EC" :"",
6817 pDesc->data.dw3.fLC ? " LC" :"",
6818 pDesc->data.dw3.fTXSM? " TXSM":"",
6819 pDesc->data.dw3.fIXSM? " IXSM":"",
6820 E1K_SPEC_CFI(pDesc->data.dw3.u16Special) ? "CFI" :"cfi",
6821 E1K_SPEC_VLAN(pDesc->data.dw3.u16Special),
6822 E1K_SPEC_PRI(pDesc->data.dw3.u16Special));
6823 break;
6824 case E1K_DTYP_LEGACY:
6825 pHlp->pfnPrintf(pHlp, "%RGp: Type=Legacy Address=%16LX DTALEN=%05X\n",
6826 addr,
6827 pDesc->data.u64BufAddr,
6828 pDesc->legacy.cmd.u16Length);
6829 pHlp->pfnPrintf(pHlp, " CMD:%s%s%s%s%s%s%s STA:%s%s%s CSO=%02x CSS=%02x SPECIAL:%s VLAN=%03x PRI=%x\n",
6830 pDesc->legacy.cmd.fIDE ? " IDE" :"",
6831 pDesc->legacy.cmd.fVLE ? " VLE" :"",
6832 pDesc->legacy.cmd.fRPS ? " RPS" :"",
6833 pDesc->legacy.cmd.fRS ? " RS" :"",
6834 pDesc->legacy.cmd.fIC ? " IC" :"",
6835 pDesc->legacy.cmd.fIFCS? " IFCS":"",
6836 pDesc->legacy.cmd.fEOP ? " EOP" :"",
6837 pDesc->legacy.dw3.fDD ? " DD" :"",
6838 pDesc->legacy.dw3.fEC ? " EC" :"",
6839 pDesc->legacy.dw3.fLC ? " LC" :"",
6840 pDesc->legacy.cmd.u8CSO,
6841 pDesc->legacy.dw3.u8CSS,
6842 E1K_SPEC_CFI(pDesc->legacy.dw3.u16Special) ? "CFI" :"cfi",
6843 E1K_SPEC_VLAN(pDesc->legacy.dw3.u16Special),
6844 E1K_SPEC_PRI(pDesc->legacy.dw3.u16Special));
6845 break;
6846 default:
6847 pHlp->pfnPrintf(pHlp, "%RGp: Invalid Transmit Descriptor\n", addr);
6848 break;
6849 }
6850}
6851
6852/**
6853 * Status info callback.
6854 *
6855 * @param pDevIns The device instance.
6856 * @param pHlp The output helpers.
6857 * @param pszArgs The arguments.
6858 */
6859static DECLCALLBACK(void) e1kInfo(PPDMDEVINS pDevIns, PCDBGFINFOHLP pHlp, const char *pszArgs)
6860{
6861 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
6862 unsigned i;
6863 // bool fRcvRing = false;
6864 // bool fXmtRing = false;
6865
6866 /*
6867 * Parse args.
6868 if (pszArgs)
6869 {
6870 fRcvRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "rcv");
6871 fXmtRing = strstr(pszArgs, "verbose") || strstr(pszArgs, "xmt");
6872 }
6873 */
6874
6875 /*
6876 * Show info.
6877 */
6878 pHlp->pfnPrintf(pHlp, "E1000 #%d: port=%RTiop mmio=%RGp mac-cfg=%RTmac %s%s%s\n",
6879 pDevIns->iInstance, pState->addrIOPort, pState->addrMMReg,
6880 &pState->macConfigured, g_Chips[pState->eChip].pcszName,
6881 pState->fGCEnabled ? " GC" : "", pState->fR0Enabled ? " R0" : "");
6882
6883 e1kCsEnter(pState, VERR_INTERNAL_ERROR); /* Not sure why but PCNet does it */
6884
6885 for (i = 0; i < E1K_NUM_OF_32BIT_REGS; ++i)
6886 pHlp->pfnPrintf(pHlp, "%8.8s = %08x\n", s_e1kRegMap[i].abbrev, pState->auRegs[i]);
6887
6888 for (i = 0; i < RT_ELEMENTS(pState->aRecAddr.array); i++)
6889 {
6890 E1KRAELEM* ra = pState->aRecAddr.array + i;
6891 if (ra->ctl & RA_CTL_AV)
6892 {
6893 const char *pcszTmp;
6894 switch (ra->ctl & RA_CTL_AS)
6895 {
6896 case 0: pcszTmp = "DST"; break;
6897 case 1: pcszTmp = "SRC"; break;
6898 default: pcszTmp = "reserved";
6899 }
6900 pHlp->pfnPrintf(pHlp, "RA%02d: %s %RTmac\n", i, pcszTmp, ra->addr);
6901 }
6902 }
6903 unsigned cDescs = RDLEN / sizeof(E1KRXDESC);
6904 pHlp->pfnPrintf(pHlp, "\n-- Receive Descriptors (%d total) --\n", cDescs);
6905 for (i = 0; i < cDescs; ++i)
6906 {
6907 E1KRXDESC desc;
6908 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(RDBAH, RDBAL, i),
6909 &desc, sizeof(desc));
6910 e1kRDescInfo(pState, pHlp, e1kDescAddr(RDBAH, RDBAL, i), &desc);
6911 }
6912 cDescs = TDLEN / sizeof(E1KTXDESC);
6913 pHlp->pfnPrintf(pHlp, "\n-- Transmit Descriptors (%d total) --\n", cDescs);
6914 for (i = 0; i < cDescs; ++i)
6915 {
6916 E1KTXDESC desc;
6917 PDMDevHlpPhysRead(pDevIns, e1kDescAddr(TDBAH, TDBAL, i),
6918 &desc, sizeof(desc));
6919 e1kTDescInfo(pState, pHlp, e1kDescAddr(TDBAH, TDBAL, i), &desc);
6920 }
6921
6922
6923#ifdef E1K_INT_STATS
6924 pHlp->pfnPrintf(pHlp, "Interrupt attempts: %d\n", pState->uStatIntTry);
6925 pHlp->pfnPrintf(pHlp, "Interrupts raised : %d\n", pState->uStatInt);
6926 pHlp->pfnPrintf(pHlp, "Interrupts lowered: %d\n", pState->uStatIntLower);
6927 pHlp->pfnPrintf(pHlp, "Interrupts delayed: %d\n", pState->uStatIntDly);
6928 pHlp->pfnPrintf(pHlp, "Disabled delayed: %d\n", pState->uStatDisDly);
6929 pHlp->pfnPrintf(pHlp, "Interrupts skipped: %d\n", pState->uStatIntSkip);
6930 pHlp->pfnPrintf(pHlp, "Masked interrupts : %d\n", pState->uStatIntMasked);
6931 pHlp->pfnPrintf(pHlp, "Early interrupts : %d\n", pState->uStatIntEarly);
6932 pHlp->pfnPrintf(pHlp, "Late interrupts : %d\n", pState->uStatIntLate);
6933 pHlp->pfnPrintf(pHlp, "Lost interrupts : %d\n", pState->iStatIntLost);
6934 pHlp->pfnPrintf(pHlp, "Interrupts by RX : %d\n", pState->uStatIntRx);
6935 pHlp->pfnPrintf(pHlp, "Interrupts by TX : %d\n", pState->uStatIntTx);
6936 pHlp->pfnPrintf(pHlp, "Interrupts by ICS : %d\n", pState->uStatIntICS);
6937 pHlp->pfnPrintf(pHlp, "Interrupts by RDTR: %d\n", pState->uStatIntRDTR);
6938 pHlp->pfnPrintf(pHlp, "Interrupts by RDMT: %d\n", pState->uStatIntRXDMT0);
6939 pHlp->pfnPrintf(pHlp, "Interrupts by TXQE: %d\n", pState->uStatIntTXQE);
6940 pHlp->pfnPrintf(pHlp, "TX int delay asked: %d\n", pState->uStatTxIDE);
6941 pHlp->pfnPrintf(pHlp, "TX no report asked: %d\n", pState->uStatTxNoRS);
6942 pHlp->pfnPrintf(pHlp, "TX abs timer expd : %d\n", pState->uStatTAD);
6943 pHlp->pfnPrintf(pHlp, "TX int timer expd : %d\n", pState->uStatTID);
6944 pHlp->pfnPrintf(pHlp, "RX abs timer expd : %d\n", pState->uStatRAD);
6945 pHlp->pfnPrintf(pHlp, "RX int timer expd : %d\n", pState->uStatRID);
6946 pHlp->pfnPrintf(pHlp, "TX CTX descriptors: %d\n", pState->uStatDescCtx);
6947 pHlp->pfnPrintf(pHlp, "TX DAT descriptors: %d\n", pState->uStatDescDat);
6948 pHlp->pfnPrintf(pHlp, "TX LEG descriptors: %d\n", pState->uStatDescLeg);
6949 pHlp->pfnPrintf(pHlp, "Received frames : %d\n", pState->uStatRxFrm);
6950 pHlp->pfnPrintf(pHlp, "Transmitted frames: %d\n", pState->uStatTxFrm);
6951#endif /* E1K_INT_STATS */
6952
6953 e1kCsLeave(pState);
6954}
6955
6956/**
6957 * Sets 8-bit register in PCI configuration space.
6958 * @param refPciDev The PCI device.
6959 * @param uOffset The register offset.
6960 * @param u16Value The value to store in the register.
6961 * @thread EMT
6962 */
6963DECLINLINE(void) e1kPCICfgSetU8(PCIDEVICE& refPciDev, uint32_t uOffset, uint8_t u8Value)
6964{
6965 Assert(uOffset < sizeof(refPciDev.config));
6966 refPciDev.config[uOffset] = u8Value;
6967}
6968
6969/**
6970 * Sets 16-bit register in PCI configuration space.
6971 * @param refPciDev The PCI device.
6972 * @param uOffset The register offset.
6973 * @param u16Value The value to store in the register.
6974 * @thread EMT
6975 */
6976DECLINLINE(void) e1kPCICfgSetU16(PCIDEVICE& refPciDev, uint32_t uOffset, uint16_t u16Value)
6977{
6978 Assert(uOffset+sizeof(u16Value) <= sizeof(refPciDev.config));
6979 *(uint16_t*)&refPciDev.config[uOffset] = u16Value;
6980}
6981
6982/**
6983 * Sets 32-bit register in PCI configuration space.
6984 * @param refPciDev The PCI device.
6985 * @param uOffset The register offset.
6986 * @param u32Value The value to store in the register.
6987 * @thread EMT
6988 */
6989DECLINLINE(void) e1kPCICfgSetU32(PCIDEVICE& refPciDev, uint32_t uOffset, uint32_t u32Value)
6990{
6991 Assert(uOffset+sizeof(u32Value) <= sizeof(refPciDev.config));
6992 *(uint32_t*)&refPciDev.config[uOffset] = u32Value;
6993}
6994
6995/**
6996 * Set PCI configuration space registers.
6997 *
6998 * @param pci Reference to PCI device structure.
6999 * @thread EMT
7000 */
7001static DECLCALLBACK(void) e1kConfigurePCI(PCIDEVICE& pci, E1KCHIP eChip)
7002{
7003 Assert(eChip < RT_ELEMENTS(g_Chips));
7004 /* Configure PCI Device, assume 32-bit mode ******************************/
7005 PCIDevSetVendorId(&pci, g_Chips[eChip].uPCIVendorId);
7006 PCIDevSetDeviceId(&pci, g_Chips[eChip].uPCIDeviceId);
7007 e1kPCICfgSetU16(pci, VBOX_PCI_SUBSYSTEM_VENDOR_ID, g_Chips[eChip].uPCISubsystemVendorId);
7008 e1kPCICfgSetU16(pci, VBOX_PCI_SUBSYSTEM_ID, g_Chips[eChip].uPCISubsystemId);
7009
7010 e1kPCICfgSetU16(pci, VBOX_PCI_COMMAND, 0x0000);
7011 /* DEVSEL Timing (medium device), 66 MHz Capable, New capabilities */
7012 e1kPCICfgSetU16(pci, VBOX_PCI_STATUS,
7013 VBOX_PCI_STATUS_DEVSEL_MEDIUM | VBOX_PCI_STATUS_CAP_LIST | VBOX_PCI_STATUS_66MHZ);
7014 /* Stepping A2 */
7015 e1kPCICfgSetU8( pci, VBOX_PCI_REVISION_ID, 0x02);
7016 /* Ethernet adapter */
7017 e1kPCICfgSetU8( pci, VBOX_PCI_CLASS_PROG, 0x00);
7018 e1kPCICfgSetU16(pci, VBOX_PCI_CLASS_DEVICE, 0x0200);
7019 /* normal single function Ethernet controller */
7020 e1kPCICfgSetU8( pci, VBOX_PCI_HEADER_TYPE, 0x00);
7021 /* Memory Register Base Address */
7022 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_0, 0x00000000);
7023 /* Memory Flash Base Address */
7024 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_1, 0x00000000);
7025 /* IO Register Base Address */
7026 e1kPCICfgSetU32(pci, VBOX_PCI_BASE_ADDRESS_2, 0x00000001);
7027 /* Expansion ROM Base Address */
7028 e1kPCICfgSetU32(pci, VBOX_PCI_ROM_ADDRESS, 0x00000000);
7029 /* Capabilities Pointer */
7030 e1kPCICfgSetU8( pci, VBOX_PCI_CAPABILITY_LIST, 0xDC);
7031 /* Interrupt Pin: INTA# */
7032 e1kPCICfgSetU8( pci, VBOX_PCI_INTERRUPT_PIN, 0x01);
7033 /* Max_Lat/Min_Gnt: very high priority and time slice */
7034 e1kPCICfgSetU8( pci, VBOX_PCI_MIN_GNT, 0xFF);
7035 e1kPCICfgSetU8( pci, VBOX_PCI_MAX_LAT, 0x00);
7036
7037 /* PCI Power Management Registers ****************************************/
7038 /* Capability ID: PCI Power Management Registers */
7039 e1kPCICfgSetU8( pci, 0xDC, VBOX_PCI_CAP_ID_PM);
7040 /* Next Item Pointer: PCI-X */
7041 e1kPCICfgSetU8( pci, 0xDC + 1, 0xE4);
7042 /* Power Management Capabilities: PM disabled, DSI */
7043 e1kPCICfgSetU16(pci, 0xDC + 2,
7044 0x0002 | VBOX_PCI_PM_CAP_DSI);
7045 /* Power Management Control / Status Register: PM disabled */
7046 e1kPCICfgSetU16(pci, 0xDC + 4, 0x0000);
7047 /* PMCSR_BSE Bridge Support Extensions: Not supported */
7048 e1kPCICfgSetU8( pci, 0xDC + 6, 0x00);
7049 /* Data Register: PM disabled, always 0 */
7050 e1kPCICfgSetU8( pci, 0xDC + 7, 0x00);
7051
7052 /* PCI-X Configuration Registers *****************************************/
7053 /* Capability ID: PCI-X Configuration Registers */
7054 e1kPCICfgSetU8( pci, 0xE4, VBOX_PCI_CAP_ID_PCIX);
7055#ifdef E1K_WITH_MSI
7056 e1kPCICfgSetU8( pci, 0xE4 + 1, 0x80);
7057#else
7058 /* Next Item Pointer: None (Message Signalled Interrupts are disabled) */
7059 e1kPCICfgSetU8( pci, 0xE4 + 1, 0x00);
7060#endif
7061 /* PCI-X Command: Enable Relaxed Ordering */
7062 e1kPCICfgSetU16(pci, 0xE4 + 2, VBOX_PCI_X_CMD_ERO);
7063 /* PCI-X Status: 32-bit, 66MHz*/
7064 /// @todo: is this value really correct? fff8 doesn't look like actual PCI address
7065 e1kPCICfgSetU32(pci, 0xE4 + 4, 0x0040FFF8);
7066}
7067
7068/**
7069 * @interface_method_impl{PDMDEVREG,pfnConstruct}
7070 */
7071static DECLCALLBACK(int) e1kConstruct(PPDMDEVINS pDevIns, int iInstance, PCFGMNODE pCfg)
7072{
7073 E1KSTATE* pState = PDMINS_2_DATA(pDevIns, E1KSTATE*);
7074 int rc;
7075 PDMDEV_CHECK_VERSIONS_RETURN(pDevIns);
7076
7077 /* Init handles and log related stuff. */
7078 RTStrPrintf(pState->szInstance, sizeof(pState->szInstance), "E1000#%d", iInstance);
7079 E1kLog(("%s Constructing new instance sizeof(E1KRXDESC)=%d\n", INSTANCE(pState), sizeof(E1KRXDESC)));
7080 pState->hEventMoreRxDescAvail = NIL_RTSEMEVENT;
7081
7082 /*
7083 * Validate configuration.
7084 */
7085 if (!CFGMR3AreValuesValid(pCfg, "MAC\0" "CableConnected\0" "AdapterType\0"
7086 "LineSpeed\0" "GCEnabled\0" "R0Enabled\0"
7087 "EthernetCRC\0" "LinkUpDelay\0"))
7088 return PDMDEV_SET_ERROR(pDevIns, VERR_PDM_DEVINS_UNKNOWN_CFG_VALUES,
7089 N_("Invalid configuration for E1000 device"));
7090
7091 /** @todo: LineSpeed unused! */
7092
7093 pState->fR0Enabled = true;
7094 pState->fGCEnabled = true;
7095 pState->fEthernetCRC = true;
7096
7097 /* Get config params */
7098 rc = CFGMR3QueryBytes(pCfg, "MAC", pState->macConfigured.au8,
7099 sizeof(pState->macConfigured.au8));
7100 if (RT_FAILURE(rc))
7101 return PDMDEV_SET_ERROR(pDevIns, rc,
7102 N_("Configuration error: Failed to get MAC address"));
7103 rc = CFGMR3QueryBool(pCfg, "CableConnected", &pState->fCableConnected);
7104 if (RT_FAILURE(rc))
7105 return PDMDEV_SET_ERROR(pDevIns, rc,
7106 N_("Configuration error: Failed to get the value of 'CableConnected'"));
7107 rc = CFGMR3QueryU32(pCfg, "AdapterType", (uint32_t*)&pState->eChip);
7108 if (RT_FAILURE(rc))
7109 return PDMDEV_SET_ERROR(pDevIns, rc,
7110 N_("Configuration error: Failed to get the value of 'AdapterType'"));
7111 Assert(pState->eChip <= E1K_CHIP_82545EM);
7112 rc = CFGMR3QueryBoolDef(pCfg, "GCEnabled", &pState->fGCEnabled, true);
7113 if (RT_FAILURE(rc))
7114 return PDMDEV_SET_ERROR(pDevIns, rc,
7115 N_("Configuration error: Failed to get the value of 'GCEnabled'"));
7116
7117 rc = CFGMR3QueryBoolDef(pCfg, "R0Enabled", &pState->fR0Enabled, true);
7118 if (RT_FAILURE(rc))
7119 return PDMDEV_SET_ERROR(pDevIns, rc,
7120 N_("Configuration error: Failed to get the value of 'R0Enabled'"));
7121
7122 rc = CFGMR3QueryBoolDef(pCfg, "EthernetCRC", &pState->fEthernetCRC, true);
7123 if (RT_FAILURE(rc))
7124 return PDMDEV_SET_ERROR(pDevIns, rc,
7125 N_("Configuration error: Failed to get the value of 'EthernetCRC'"));
7126 rc = CFGMR3QueryU32Def(pCfg, "LinkUpDelay", (uint32_t*)&pState->cMsLinkUpDelay, 5000); /* ms */
7127 if (RT_FAILURE(rc))
7128 return PDMDEV_SET_ERROR(pDevIns, rc,
7129 N_("Configuration error: Failed to get the value of 'LinkUpDelay'"));
7130 Assert(pState->cMsLinkUpDelay <= 300000); /* less than 5 minutes */
7131 if (pState->cMsLinkUpDelay > 5000)
7132 {
7133 LogRel(("%s WARNING! Link up delay is set to %u seconds!\n",
7134 INSTANCE(pState), pState->cMsLinkUpDelay / 1000));
7135 }
7136 else if (pState->cMsLinkUpDelay == 0)
7137 {
7138 LogRel(("%s WARNING! Link up delay is disabled!\n", INSTANCE(pState)));
7139 }
7140
7141 E1kLog(("%s Chip=%s LinkUpDelay=%ums\n", INSTANCE(pState),
7142 g_Chips[pState->eChip].pcszName, pState->cMsLinkUpDelay));
7143
7144 /* Initialize state structure */
7145 pState->pDevInsR3 = pDevIns;
7146 pState->pDevInsR0 = PDMDEVINS_2_R0PTR(pDevIns);
7147 pState->pDevInsRC = PDMDEVINS_2_RCPTR(pDevIns);
7148 pState->u16TxPktLen = 0;
7149 pState->fIPcsum = false;
7150 pState->fTCPcsum = false;
7151 pState->fIntMaskUsed = false;
7152 pState->fDelayInts = false;
7153 pState->fLocked = false;
7154 pState->u64AckedAt = 0;
7155 pState->led.u32Magic = PDMLED_MAGIC;
7156 pState->u32PktNo = 1;
7157
7158#ifdef E1K_INT_STATS
7159 pState->uStatInt = 0;
7160 pState->uStatIntTry = 0;
7161 pState->uStatIntLower = 0;
7162 pState->uStatIntDly = 0;
7163 pState->uStatDisDly = 0;
7164 pState->iStatIntLost = 0;
7165 pState->iStatIntLostOne = 0;
7166 pState->uStatIntLate = 0;
7167 pState->uStatIntMasked = 0;
7168 pState->uStatIntEarly = 0;
7169 pState->uStatIntRx = 0;
7170 pState->uStatIntTx = 0;
7171 pState->uStatIntICS = 0;
7172 pState->uStatIntRDTR = 0;
7173 pState->uStatIntRXDMT0 = 0;
7174 pState->uStatIntTXQE = 0;
7175 pState->uStatTxNoRS = 0;
7176 pState->uStatTxIDE = 0;
7177 pState->uStatTAD = 0;
7178 pState->uStatTID = 0;
7179 pState->uStatRAD = 0;
7180 pState->uStatRID = 0;
7181 pState->uStatRxFrm = 0;
7182 pState->uStatTxFrm = 0;
7183 pState->uStatDescCtx = 0;
7184 pState->uStatDescDat = 0;
7185 pState->uStatDescLeg = 0;
7186#endif /* E1K_INT_STATS */
7187
7188 /* Interfaces */
7189 pState->IBase.pfnQueryInterface = e1kQueryInterface;
7190
7191 pState->INetworkDown.pfnWaitReceiveAvail = e1kNetworkDown_WaitReceiveAvail;
7192 pState->INetworkDown.pfnReceive = e1kNetworkDown_Receive;
7193 pState->INetworkDown.pfnXmitPending = e1kNetworkDown_XmitPending;
7194
7195 pState->ILeds.pfnQueryStatusLed = e1kQueryStatusLed;
7196
7197 pState->INetworkConfig.pfnGetMac = e1kGetMac;
7198 pState->INetworkConfig.pfnGetLinkState = e1kGetLinkState;
7199 pState->INetworkConfig.pfnSetLinkState = e1kSetLinkState;
7200
7201 /* Initialize the EEPROM */
7202 pState->eeprom.init(pState->macConfigured);
7203
7204 /* Initialize internal PHY */
7205 Phy::init(&pState->phy, iInstance,
7206 pState->eChip == E1K_CHIP_82543GC?
7207 PHY_EPID_M881000 : PHY_EPID_M881011);
7208 Phy::setLinkStatus(&pState->phy, pState->fCableConnected);
7209
7210 rc = PDMDevHlpSSMRegisterEx(pDevIns, E1K_SAVEDSTATE_VERSION, sizeof(E1KSTATE), NULL,
7211 NULL, e1kLiveExec, NULL,
7212 e1kSavePrep, e1kSaveExec, NULL,
7213 e1kLoadPrep, e1kLoadExec, e1kLoadDone);
7214 if (RT_FAILURE(rc))
7215 return rc;
7216
7217 /* Initialize critical section */
7218 rc = PDMDevHlpCritSectInit(pDevIns, &pState->cs, RT_SRC_POS, "%s", pState->szInstance);
7219 if (RT_FAILURE(rc))
7220 return rc;
7221 rc = PDMDevHlpCritSectInit(pDevIns, &pState->csRx, RT_SRC_POS, "%sRX", pState->szInstance);
7222 if (RT_FAILURE(rc))
7223 return rc;
7224#ifdef E1K_WITH_TX_CS
7225 rc = PDMDevHlpCritSectInit(pDevIns, &pState->csTx, RT_SRC_POS, "%sTX", pState->szInstance);
7226 if (RT_FAILURE(rc))
7227 return rc;
7228#endif /* E1K_WITH_TX_CS */
7229
7230 /* Set PCI config registers */
7231 e1kConfigurePCI(pState->pciDevice, pState->eChip);
7232 /* Register PCI device */
7233 rc = PDMDevHlpPCIRegister(pDevIns, &pState->pciDevice);
7234 if (RT_FAILURE(rc))
7235 return rc;
7236
7237#ifdef E1K_WITH_MSI
7238 PDMMSIREG aMsiReg;
7239 aMsiReg.cMsiVectors = 1;
7240 aMsiReg.iMsiCapOffset = 0x80;
7241 aMsiReg.iMsiNextOffset = 0x0;
7242 aMsiReg.fMsi64bit = false;
7243 rc = PDMDevHlpPCIRegisterMsi(pDevIns, &aMsiReg);
7244 AssertRC(rc);
7245 if (RT_FAILURE (rc))
7246 return rc;
7247#endif
7248
7249
7250 /* Map our registers to memory space (region 0, see e1kConfigurePCI)*/
7251 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 0, E1K_MM_SIZE,
7252 PCI_ADDRESS_SPACE_MEM, e1kMap);
7253 if (RT_FAILURE(rc))
7254 return rc;
7255 /* Map our registers to IO space (region 2, see e1kConfigurePCI) */
7256 rc = PDMDevHlpPCIIORegionRegister(pDevIns, 2, E1K_IOPORT_SIZE,
7257 PCI_ADDRESS_SPACE_IO, e1kMap);
7258 if (RT_FAILURE(rc))
7259 return rc;
7260
7261 /* Create transmit queue */
7262 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7263 e1kTxQueueConsumer, true, "E1000-Xmit", &pState->pTxQueueR3);
7264 if (RT_FAILURE(rc))
7265 return rc;
7266 pState->pTxQueueR0 = PDMQueueR0Ptr(pState->pTxQueueR3);
7267 pState->pTxQueueRC = PDMQueueRCPtr(pState->pTxQueueR3);
7268
7269 /* Create the RX notifier signaller. */
7270 rc = PDMDevHlpQueueCreate(pDevIns, sizeof(PDMQUEUEITEMCORE), 1, 0,
7271 e1kCanRxQueueConsumer, true, "E1000-Rcv", &pState->pCanRxQueueR3);
7272 if (RT_FAILURE(rc))
7273 return rc;
7274 pState->pCanRxQueueR0 = PDMQueueR0Ptr(pState->pCanRxQueueR3);
7275 pState->pCanRxQueueRC = PDMQueueRCPtr(pState->pCanRxQueueR3);
7276
7277#ifdef E1K_USE_TX_TIMERS
7278 /* Create Transmit Interrupt Delay Timer */
7279 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxIntDelayTimer, pState,
7280 TMTIMER_FLAGS_NO_CRIT_SECT,
7281 "E1000 Transmit Interrupt Delay Timer", &pState->pTIDTimerR3);
7282 if (RT_FAILURE(rc))
7283 return rc;
7284 pState->pTIDTimerR0 = TMTimerR0Ptr(pState->pTIDTimerR3);
7285 pState->pTIDTimerRC = TMTimerRCPtr(pState->pTIDTimerR3);
7286
7287# ifndef E1K_NO_TAD
7288 /* Create Transmit Absolute Delay Timer */
7289 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kTxAbsDelayTimer, pState,
7290 TMTIMER_FLAGS_NO_CRIT_SECT,
7291 "E1000 Transmit Absolute Delay Timer", &pState->pTADTimerR3);
7292 if (RT_FAILURE(rc))
7293 return rc;
7294 pState->pTADTimerR0 = TMTimerR0Ptr(pState->pTADTimerR3);
7295 pState->pTADTimerRC = TMTimerRCPtr(pState->pTADTimerR3);
7296# endif /* E1K_NO_TAD */
7297#endif /* E1K_USE_TX_TIMERS */
7298
7299#ifdef E1K_USE_RX_TIMERS
7300 /* Create Receive Interrupt Delay Timer */
7301 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxIntDelayTimer, pState,
7302 TMTIMER_FLAGS_NO_CRIT_SECT,
7303 "E1000 Receive Interrupt Delay Timer", &pState->pRIDTimerR3);
7304 if (RT_FAILURE(rc))
7305 return rc;
7306 pState->pRIDTimerR0 = TMTimerR0Ptr(pState->pRIDTimerR3);
7307 pState->pRIDTimerRC = TMTimerRCPtr(pState->pRIDTimerR3);
7308
7309 /* Create Receive Absolute Delay Timer */
7310 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kRxAbsDelayTimer, pState,
7311 TMTIMER_FLAGS_NO_CRIT_SECT,
7312 "E1000 Receive Absolute Delay Timer", &pState->pRADTimerR3);
7313 if (RT_FAILURE(rc))
7314 return rc;
7315 pState->pRADTimerR0 = TMTimerR0Ptr(pState->pRADTimerR3);
7316 pState->pRADTimerRC = TMTimerRCPtr(pState->pRADTimerR3);
7317#endif /* E1K_USE_RX_TIMERS */
7318
7319 /* Create Late Interrupt Timer */
7320 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLateIntTimer, pState,
7321 TMTIMER_FLAGS_NO_CRIT_SECT,
7322 "E1000 Late Interrupt Timer", &pState->pIntTimerR3);
7323 if (RT_FAILURE(rc))
7324 return rc;
7325 pState->pIntTimerR0 = TMTimerR0Ptr(pState->pIntTimerR3);
7326 pState->pIntTimerRC = TMTimerRCPtr(pState->pIntTimerR3);
7327
7328 /* Create Link Up Timer */
7329 rc = PDMDevHlpTMTimerCreate(pDevIns, TMCLOCK_VIRTUAL, e1kLinkUpTimer, pState,
7330 TMTIMER_FLAGS_NO_CRIT_SECT,
7331 "E1000 Link Up Timer", &pState->pLUTimerR3);
7332 if (RT_FAILURE(rc))
7333 return rc;
7334 pState->pLUTimerR0 = TMTimerR0Ptr(pState->pLUTimerR3);
7335 pState->pLUTimerRC = TMTimerRCPtr(pState->pLUTimerR3);
7336
7337 /* Register the info item */
7338 char szTmp[20];
7339 RTStrPrintf(szTmp, sizeof(szTmp), "e1k%d", iInstance);
7340 PDMDevHlpDBGFInfoRegister(pDevIns, szTmp, "E1000 info.", e1kInfo);
7341
7342 /* Status driver */
7343 PPDMIBASE pBase;
7344 rc = PDMDevHlpDriverAttach(pDevIns, PDM_STATUS_LUN, &pState->IBase, &pBase, "Status Port");
7345 if (RT_FAILURE(rc))
7346 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the status LUN"));
7347 pState->pLedsConnector = PDMIBASE_QUERY_INTERFACE(pBase, PDMILEDCONNECTORS);
7348
7349 rc = PDMDevHlpDriverAttach(pDevIns, 0, &pState->IBase, &pState->pDrvBase, "Network Port");
7350 if (RT_SUCCESS(rc))
7351 {
7352 if (rc == VINF_NAT_DNS)
7353 {
7354 PDMDevHlpVMSetRuntimeError(pDevIns, 0 /*fFlags*/, "NoDNSforNAT",
7355 N_("A Domain Name Server (DNS) for NAT networking could not be determined. Ensure that your host is correctly connected to an ISP. If you ignore this warning the guest will not be able to perform nameserver lookups and it will probably observe delays if trying so"));
7356 }
7357 pState->pDrvR3 = PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMINETWORKUP);
7358 AssertMsgReturn(pState->pDrvR3, ("Failed to obtain the PDMINETWORKUP interface!\n"),
7359 VERR_PDM_MISSING_INTERFACE_BELOW);
7360
7361 pState->pDrvR0 = PDMIBASER0_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASER0), PDMINETWORKUP);
7362 pState->pDrvRC = PDMIBASERC_QUERY_INTERFACE(PDMIBASE_QUERY_INTERFACE(pState->pDrvBase, PDMIBASERC), PDMINETWORKUP);
7363 }
7364 else if ( rc == VERR_PDM_NO_ATTACHED_DRIVER
7365 || rc == VERR_PDM_CFG_MISSING_DRIVER_NAME)
7366 {
7367 /* No error! */
7368 E1kLog(("%s This adapter is not attached to any network!\n", INSTANCE(pState)));
7369 }
7370 else
7371 return PDMDEV_SET_ERROR(pDevIns, rc, N_("Failed to attach the network LUN"));
7372
7373 rc = RTSemEventCreate(&pState->hEventMoreRxDescAvail);
7374 if (RT_FAILURE(rc))
7375 return rc;
7376
7377 e1kHardReset(pState);
7378
7379#if defined(VBOX_WITH_STATISTICS)
7380 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in RZ", "/Devices/E1k%d/MMIO/ReadRZ", iInstance);
7381 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO reads in R3", "/Devices/E1k%d/MMIO/ReadR3", iInstance);
7382 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in RZ", "/Devices/E1k%d/MMIO/WriteRZ", iInstance);
7383 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatMMIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling MMIO writes in R3", "/Devices/E1k%d/MMIO/WriteR3", iInstance);
7384 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatEEPROMRead, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM reads", "/Devices/E1k%d/EEPROM/Read", iInstance);
7385 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatEEPROMWrite, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling EEPROM writes", "/Devices/E1k%d/EEPROM/Write", iInstance);
7386 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in RZ", "/Devices/E1k%d/IO/ReadRZ", iInstance);
7387 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOReadR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO reads in R3", "/Devices/E1k%d/IO/ReadR3", iInstance);
7388 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in RZ", "/Devices/E1k%d/IO/WriteRZ", iInstance);
7389 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIOWriteR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling IO writes in R3", "/Devices/E1k%d/IO/WriteR3", iInstance);
7390 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatLateIntTimer, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling late int timer", "/Devices/E1k%d/LateInt/Timer", iInstance);
7391 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatLateInts, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of late interrupts", "/Devices/E1k%d/LateInt/Occured", iInstance);
7392 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsRaised, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of raised interrupts", "/Devices/E1k%d/Interrupts/Raised", iInstance);
7393 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatIntsPrevented, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of prevented interrupts", "/Devices/E1k%d/Interrupts/Prevented", iInstance);
7394 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceive, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive", "/Devices/E1k%d/Receive/Total", iInstance);
7395 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveCRC, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive checksumming", "/Devices/E1k%d/Receive/CRC", iInstance);
7396 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveFilter, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive filtering", "/Devices/E1k%d/Receive/Filter", iInstance);
7397 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveStore, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling receive storing", "/Devices/E1k%d/Receive/Store", iInstance);
7398 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatRxOverflow, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_OCCURENCE, "Profiling RX overflows", "/Devices/E1k%d/RxOverflow", iInstance);
7399 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatRxOverflowWakeup, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Nr of RX overflow wakeups", "/Devices/E1k%d/RxOverflowWakeup", iInstance);
7400#endif /* VBOX_WITH_STATISTICS */
7401 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatReceiveBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data received", "/Devices/E1k%d/ReceiveBytes", iInstance);
7402#if defined(VBOX_WITH_STATISTICS)
7403 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in RZ", "/Devices/E1k%d/Transmit/TotalRZ", iInstance);
7404 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling transmits in R3", "/Devices/E1k%d/Transmit/TotalR3", iInstance);
7405#endif /* VBOX_WITH_STATISTICS */
7406 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitBytes, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Amount of data transmitted", "/Devices/E1k%d/TransmitBytes", iInstance);
7407#if defined(VBOX_WITH_STATISTICS)
7408 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitSendRZ, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in RZ", "/Devices/E1k%d/Transmit/SendRZ", iInstance);
7409 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTransmitSendR3, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, "Profiling send transmit in R3", "/Devices/E1k%d/Transmit/SendR3", iInstance);
7410
7411 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescCtxNormal, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of normal context descriptors","/Devices/E1k%d/TxDesc/ContexNormal", iInstance);
7412 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescCtxTSE, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TSE context descriptors", "/Devices/E1k%d/TxDesc/ContextTSE", iInstance);
7413 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX data descriptors", "/Devices/E1k%d/TxDesc/Data", iInstance);
7414 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescLegacy, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX legacy descriptors", "/Devices/E1k%d/TxDesc/Legacy", iInstance);
7415 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxDescTSEData, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of TX TSE data descriptors", "/Devices/E1k%d/TxDesc/TSEData", iInstance);
7416 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathFallback, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Fallback TSE descriptor path", "/Devices/E1k%d/TxPath/Fallback", iInstance);
7417 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathGSO, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "GSO TSE descriptor path", "/Devices/E1k%d/TxPath/GSO", iInstance);
7418 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatTxPathRegular, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Regular descriptor path", "/Devices/E1k%d/TxPath/Normal", iInstance);
7419 PDMDevHlpSTAMRegisterF(pDevIns, &pState->StatPHYAccesses, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Number of PHY accesses", "/Devices/E1k%d/PHYAccesses", iInstance);
7420#endif /* VBOX_WITH_STATISTICS */
7421
7422 return VINF_SUCCESS;
7423}
7424
7425/**
7426 * The device registration structure.
7427 */
7428const PDMDEVREG g_DeviceE1000 =
7429{
7430 /* Structure version. PDM_DEVREG_VERSION defines the current version. */
7431 PDM_DEVREG_VERSION,
7432 /* Device name. */
7433 "e1000",
7434 /* Name of guest context module (no path).
7435 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7436 "VBoxDDGC.gc",
7437 /* Name of ring-0 module (no path).
7438 * Only evalutated if PDM_DEVREG_FLAGS_RC is set. */
7439 "VBoxDDR0.r0",
7440 /* The description of the device. The UTF-8 string pointed to shall, like this structure,
7441 * remain unchanged from registration till VM destruction. */
7442 "Intel PRO/1000 MT Desktop Ethernet.\n",
7443
7444 /* Flags, combination of the PDM_DEVREG_FLAGS_* \#defines. */
7445 PDM_DEVREG_FLAGS_DEFAULT_BITS | PDM_DEVREG_FLAGS_RC | PDM_DEVREG_FLAGS_R0,
7446 /* Device class(es), combination of the PDM_DEVREG_CLASS_* \#defines. */
7447 PDM_DEVREG_CLASS_NETWORK,
7448 /* Maximum number of instances (per VM). */
7449 ~0U,
7450 /* Size of the instance data. */
7451 sizeof(E1KSTATE),
7452
7453 /* Construct instance - required. */
7454 e1kConstruct,
7455 /* Destruct instance - optional. */
7456 e1kDestruct,
7457 /* Relocation command - optional. */
7458 e1kRelocate,
7459 /* I/O Control interface - optional. */
7460 NULL,
7461 /* Power on notification - optional. */
7462 NULL,
7463 /* Reset notification - optional. */
7464 e1kReset,
7465 /* Suspend notification - optional. */
7466 e1kSuspend,
7467 /* Resume notification - optional. */
7468 NULL,
7469 /* Attach command - optional. */
7470 e1kAttach,
7471 /* Detach notification - optional. */
7472 e1kDetach,
7473 /* Query a LUN base interface - optional. */
7474 NULL,
7475 /* Init complete notification - optional. */
7476 NULL,
7477 /* Power off notification - optional. */
7478 e1kPowerOff,
7479 /* pfnSoftReset */
7480 NULL,
7481 /* u32VersionEnd */
7482 PDM_DEVREG_VERSION
7483};
7484
7485#endif /* IN_RING3 */
7486#endif /* !VBOX_DEVICE_STRUCT_TESTCASE */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette