VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 13483

Last change on this file since 13483 was 13413, checked in by vboxsync, 16 years ago

VMM: spaces mostly.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 150.3 KB
Line 
1/* $Id: EM.cpp 13413 2008-10-20 23:15:20Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_em EM - The Execution Monitor / Manager
23 *
24 * The Execution Monitor/Manager is responsible for running the VM, scheduling
25 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
26 * Interpreted), and keeping the CPU states in sync. The function
27 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
28 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
29 * emR3RemExecute).
30 *
31 * The interpreted execution is only used to avoid switching between
32 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
33 * The interpretation is thus implemented as part of EM.
34 *
35 * @see grp_em
36 */
37
38/*******************************************************************************
39* Header Files *
40*******************************************************************************/
41#define LOG_GROUP LOG_GROUP_EM
42#include <VBox/em.h>
43#include <VBox/vmm.h>
44#ifdef VBOX_WITH_VMI
45# include <VBox/parav.h>
46#endif
47#include <VBox/patm.h>
48#include <VBox/csam.h>
49#include <VBox/selm.h>
50#include <VBox/trpm.h>
51#include <VBox/iom.h>
52#include <VBox/dbgf.h>
53#include <VBox/pgm.h>
54#include <VBox/rem.h>
55#include <VBox/tm.h>
56#include <VBox/mm.h>
57#include <VBox/ssm.h>
58#include <VBox/pdmapi.h>
59#include <VBox/pdmcritsect.h>
60#include <VBox/pdmqueue.h>
61#include <VBox/hwaccm.h>
62#include <VBox/patm.h>
63#include "EMInternal.h"
64#include <VBox/vm.h>
65#include <VBox/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include <VBox/dbgf.h>
69
70#include <VBox/log.h>
71#include <iprt/thread.h>
72#include <iprt/assert.h>
73#include <iprt/asm.h>
74#include <iprt/semaphore.h>
75#include <iprt/string.h>
76#include <iprt/avl.h>
77#include <iprt/stream.h>
78#include <VBox/param.h>
79#include <VBox/err.h>
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
87static int emR3Debug(PVM pVM, int rc);
88static int emR3RemStep(PVM pVM);
89static int emR3RemExecute(PVM pVM, bool *pfFFDone);
90static int emR3RawResumeHyper(PVM pVM);
91static int emR3RawStep(PVM pVM);
92DECLINLINE(int) emR3RawHandleRC(PVM pVM, PCPUMCTX pCtx, int rc);
93DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PCPUMCTX pCtx, int rc);
94static int emR3RawForcedActions(PVM pVM, PCPUMCTX pCtx);
95static int emR3RawExecute(PVM pVM, bool *pfFFDone);
96DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, const char *pszPrefix, int rcGC = VINF_SUCCESS);
97static int emR3HighPriorityPostForcedActions(PVM pVM, int rc);
98static int emR3ForcedActions(PVM pVM, int rc);
99static int emR3RawGuestTrap(PVM pVM);
100static int emR3PatchTrap(PVM pVM, PCPUMCTX pCtx, int gcret);
101
102
103/**
104 * Initializes the EM.
105 *
106 * @returns VBox status code.
107 * @param pVM The VM to operate on.
108 */
109VMMR3DECL(int) EMR3Init(PVM pVM)
110{
111 LogFlow(("EMR3Init\n"));
112 /*
113 * Assert alignment and sizes.
114 */
115 AssertRelease(!(RT_OFFSETOF(VM, em.s) & 31));
116 AssertRelease(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
117 AssertReleaseMsg(sizeof(pVM->em.s.u.FatalLongJump) <= sizeof(pVM->em.s.u.achPaddingFatalLongJump),
118 ("%d bytes, padding %d\n", sizeof(pVM->em.s.u.FatalLongJump), sizeof(pVM->em.s.u.achPaddingFatalLongJump)));
119
120 /*
121 * Init the structure.
122 */
123 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
124 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
125 if (VBOX_FAILURE(rc))
126 pVM->fRawR3Enabled = true;
127 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
128 if (VBOX_FAILURE(rc))
129 pVM->fRawR0Enabled = true;
130 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
131 pVM->em.s.enmState = EMSTATE_NONE;
132 pVM->em.s.fForceRAW = false;
133
134 rc = CPUMQueryGuestCtxPtr(pVM, &pVM->em.s.pCtx);
135 AssertMsgRC(rc, ("CPUMQueryGuestCtxPtr -> %Vrc\n", rc));
136 pVM->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
137 AssertMsg(pVM->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
138
139 /*
140 * Saved state.
141 */
142 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
143 NULL, emR3Save, NULL,
144 NULL, emR3Load, NULL);
145 if (VBOX_FAILURE(rc))
146 return rc;
147
148 /*
149 * Statistics.
150 */
151#ifdef VBOX_WITH_STATISTICS
152 PEMSTATS pStats;
153 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
154 if (VBOX_FAILURE(rc))
155 return rc;
156 pVM->em.s.pStatsR3 = pStats;
157 pVM->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
158 pVM->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
159
160 STAM_REG(pVM, &pStats->StatRZEmulate, STAMTYPE_PROFILE, "/EM/RZ/Interpret", STAMUNIT_TICKS_PER_CALL, "Profiling of EMInterpretInstruction.");
161 STAM_REG(pVM, &pStats->StatR3Emulate, STAMTYPE_PROFILE, "/EM/R3/Interpret", STAMUNIT_TICKS_PER_CALL, "Profiling of EMInterpretInstruction.");
162
163 STAM_REG(pVM, &pStats->StatRZInterpretSucceeded, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success", STAMUNIT_OCCURENCES, "The number of times an instruction was successfully interpreted.");
164 STAM_REG(pVM, &pStats->StatR3InterpretSucceeded, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success", STAMUNIT_OCCURENCES, "The number of times an instruction was successfully interpreted.");
165
166 STAM_REG_USED(pVM, &pStats->StatRZAnd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/And", STAMUNIT_OCCURENCES, "The number of times AND was successfully interpreted.");
167 STAM_REG_USED(pVM, &pStats->StatR3And, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/And", STAMUNIT_OCCURENCES, "The number of times AND was successfully interpreted.");
168 STAM_REG_USED(pVM, &pStats->StatRZAdd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Add", STAMUNIT_OCCURENCES, "The number of times ADD was successfully interpreted.");
169 STAM_REG_USED(pVM, &pStats->StatR3Add, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Add", STAMUNIT_OCCURENCES, "The number of times ADD was successfully interpreted.");
170 STAM_REG_USED(pVM, &pStats->StatRZAdc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was successfully interpreted.");
171 STAM_REG_USED(pVM, &pStats->StatR3Adc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was successfully interpreted.");
172 STAM_REG_USED(pVM, &pStats->StatRZSub, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was successfully interpreted.");
173 STAM_REG_USED(pVM, &pStats->StatR3Sub, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was successfully interpreted.");
174 STAM_REG_USED(pVM, &pStats->StatRZCpuId, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was successfully interpreted.");
175 STAM_REG_USED(pVM, &pStats->StatR3CpuId, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was successfully interpreted.");
176 STAM_REG_USED(pVM, &pStats->StatRZDec, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was successfully interpreted.");
177 STAM_REG_USED(pVM, &pStats->StatR3Dec, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was successfully interpreted.");
178 STAM_REG_USED(pVM, &pStats->StatRZHlt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was successfully interpreted.");
179 STAM_REG_USED(pVM, &pStats->StatR3Hlt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was successfully interpreted.");
180 STAM_REG_USED(pVM, &pStats->StatRZInc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Inc", STAMUNIT_OCCURENCES, "The number of times INC was successfully interpreted.");
181 STAM_REG_USED(pVM, &pStats->StatR3Inc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Inc", STAMUNIT_OCCURENCES, "The number of times INC was successfully interpreted.");
182 STAM_REG_USED(pVM, &pStats->StatRZInvlPg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Invlpg", STAMUNIT_OCCURENCES, "The number of times INVLPG was successfully interpreted.");
183 STAM_REG_USED(pVM, &pStats->StatR3InvlPg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Invlpg", STAMUNIT_OCCURENCES, "The number of times INVLPG was successfully interpreted.");
184 STAM_REG_USED(pVM, &pStats->StatRZIret, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was successfully interpreted.");
185 STAM_REG_USED(pVM, &pStats->StatR3Iret, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was successfully interpreted.");
186 STAM_REG_USED(pVM, &pStats->StatRZLLdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was successfully interpreted.");
187 STAM_REG_USED(pVM, &pStats->StatR3LLdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was successfully interpreted.");
188 STAM_REG_USED(pVM, &pStats->StatRZLIdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/LIdt", STAMUNIT_OCCURENCES, "The number of times LIDT was successfully interpreted.");
189 STAM_REG_USED(pVM, &pStats->StatR3LIdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/LIdt", STAMUNIT_OCCURENCES, "The number of times LIDT was successfully interpreted.");
190 STAM_REG_USED(pVM, &pStats->StatRZLGdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/LGdt", STAMUNIT_OCCURENCES, "The number of times LGDT was successfully interpreted.");
191 STAM_REG_USED(pVM, &pStats->StatR3LGdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/LGdt", STAMUNIT_OCCURENCES, "The number of times LGDT was successfully interpreted.");
192 STAM_REG_USED(pVM, &pStats->StatRZMov, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was successfully interpreted.");
193 STAM_REG_USED(pVM, &pStats->StatR3Mov, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was successfully interpreted.");
194 STAM_REG_USED(pVM, &pStats->StatRZMovCRx, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was successfully interpreted.");
195 STAM_REG_USED(pVM, &pStats->StatR3MovCRx, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was successfully interpreted.");
196 STAM_REG_USED(pVM, &pStats->StatRZMovDRx, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was successfully interpreted.");
197 STAM_REG_USED(pVM, &pStats->StatR3MovDRx, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was successfully interpreted.");
198 STAM_REG_USED(pVM, &pStats->StatRZOr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Or", STAMUNIT_OCCURENCES, "The number of times OR was successfully interpreted.");
199 STAM_REG_USED(pVM, &pStats->StatR3Or, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Or", STAMUNIT_OCCURENCES, "The number of times OR was successfully interpreted.");
200 STAM_REG_USED(pVM, &pStats->StatRZPop, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Pop", STAMUNIT_OCCURENCES, "The number of times POP was successfully interpreted.");
201 STAM_REG_USED(pVM, &pStats->StatR3Pop, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Pop", STAMUNIT_OCCURENCES, "The number of times POP was successfully interpreted.");
202 STAM_REG_USED(pVM, &pStats->StatRZRdtsc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was successfully interpreted.");
203 STAM_REG_USED(pVM, &pStats->StatR3Rdtsc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was successfully interpreted.");
204 STAM_REG_USED(pVM, &pStats->StatRZSti, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Sti", STAMUNIT_OCCURENCES, "The number of times STI was successfully interpreted.");
205 STAM_REG_USED(pVM, &pStats->StatR3Sti, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Sti", STAMUNIT_OCCURENCES, "The number of times STI was successfully interpreted.");
206 STAM_REG_USED(pVM, &pStats->StatRZXchg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was successfully interpreted.");
207 STAM_REG_USED(pVM, &pStats->StatR3Xchg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was successfully interpreted.");
208 STAM_REG_USED(pVM, &pStats->StatRZXor, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was successfully interpreted.");
209 STAM_REG_USED(pVM, &pStats->StatR3Xor, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was successfully interpreted.");
210 STAM_REG_USED(pVM, &pStats->StatRZMonitor, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was successfully interpreted.");
211 STAM_REG_USED(pVM, &pStats->StatR3Monitor, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was successfully interpreted.");
212 STAM_REG_USED(pVM, &pStats->StatRZMWait, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/MWait", STAMUNIT_OCCURENCES, "The number of times MWAIT was successfully interpreted.");
213 STAM_REG_USED(pVM, &pStats->StatR3MWait, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/MWait", STAMUNIT_OCCURENCES, "The number of times MWAIT was successfully interpreted.");
214 STAM_REG_USED(pVM, &pStats->StatRZBtr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was successfully interpreted.");
215 STAM_REG_USED(pVM, &pStats->StatR3Btr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was successfully interpreted.");
216 STAM_REG_USED(pVM, &pStats->StatRZBts, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was successfully interpreted.");
217 STAM_REG_USED(pVM, &pStats->StatR3Bts, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was successfully interpreted.");
218 STAM_REG_USED(pVM, &pStats->StatRZBtc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was successfully interpreted.");
219 STAM_REG_USED(pVM, &pStats->StatR3Btc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was successfully interpreted.");
220 STAM_REG_USED(pVM, &pStats->StatRZCmpXchg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was successfully interpreted.");
221 STAM_REG_USED(pVM, &pStats->StatR3CmpXchg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was successfully interpreted.");
222 STAM_REG_USED(pVM, &pStats->StatRZCmpXchg8b, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was successfully interpreted.");
223 STAM_REG_USED(pVM, &pStats->StatR3CmpXchg8b, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was successfully interpreted.");
224 STAM_REG_USED(pVM, &pStats->StatRZXAdd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was successfully interpreted.");
225 STAM_REG_USED(pVM, &pStats->StatR3XAdd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was successfully interpreted.");
226 STAM_REG_USED(pVM, &pStats->StatR3Rdmsr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Rdmsr", STAMUNIT_OCCURENCES, "The number of times RDMSR was successfully interpreted.");
227 STAM_REG_USED(pVM, &pStats->StatRZRdmsr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Rdmsr", STAMUNIT_OCCURENCES, "The number of times RDMSR was successfully interpreted.");
228 STAM_REG_USED(pVM, &pStats->StatR3Wrmsr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Wrmsr", STAMUNIT_OCCURENCES, "The number of times WRMSR was successfully interpreted.");
229 STAM_REG_USED(pVM, &pStats->StatRZWrmsr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Wrmsr", STAMUNIT_OCCURENCES, "The number of times WRMSR was successfully interpreted.");
230 STAM_REG_USED(pVM, &pStats->StatR3StosWD, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Stoswd", STAMUNIT_OCCURENCES, "The number of times STOSWD was successfully interpreted.");
231 STAM_REG_USED(pVM, &pStats->StatRZStosWD, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Stoswd", STAMUNIT_OCCURENCES, "The number of times STOSWD was successfully interpreted.");
232 STAM_REG_USED(pVM, &pStats->StatRZWbInvd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/WbInvd", STAMUNIT_OCCURENCES, "The number of times WBINVD was successfully interpreted.");
233 STAM_REG_USED(pVM, &pStats->StatR3WbInvd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/WbInvd", STAMUNIT_OCCURENCES, "The number of times WBINVD was successfully interpreted.");
234 STAM_REG_USED(pVM, &pStats->StatRZLmsw, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Success/Lmsw", STAMUNIT_OCCURENCES, "The number of times LMSW was successfully interpreted.");
235 STAM_REG_USED(pVM, &pStats->StatR3Lmsw, STAMTYPE_COUNTER, "/EM/R3/Interpret/Success/Lmsw", STAMUNIT_OCCURENCES, "The number of times LMSW was successfully interpreted.");
236
237 STAM_REG(pVM, &pStats->StatRZInterpretFailed, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed", STAMUNIT_OCCURENCES, "The number of times an instruction was not interpreted.");
238 STAM_REG(pVM, &pStats->StatR3InterpretFailed, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed", STAMUNIT_OCCURENCES, "The number of times an instruction was not interpreted.");
239
240 STAM_REG_USED(pVM, &pStats->StatRZFailedAnd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/And", STAMUNIT_OCCURENCES, "The number of times AND was not interpreted.");
241 STAM_REG_USED(pVM, &pStats->StatR3FailedAnd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/And", STAMUNIT_OCCURENCES, "The number of times AND was not interpreted.");
242 STAM_REG_USED(pVM, &pStats->StatRZFailedCpuId, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was not interpreted.");
243 STAM_REG_USED(pVM, &pStats->StatR3FailedCpuId, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was not interpreted.");
244 STAM_REG_USED(pVM, &pStats->StatRZFailedDec, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was not interpreted.");
245 STAM_REG_USED(pVM, &pStats->StatR3FailedDec, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was not interpreted.");
246 STAM_REG_USED(pVM, &pStats->StatRZFailedHlt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was not interpreted.");
247 STAM_REG_USED(pVM, &pStats->StatR3FailedHlt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was not interpreted.");
248 STAM_REG_USED(pVM, &pStats->StatRZFailedInc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Inc", STAMUNIT_OCCURENCES, "The number of times INC was not interpreted.");
249 STAM_REG_USED(pVM, &pStats->StatR3FailedInc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Inc", STAMUNIT_OCCURENCES, "The number of times INC was not interpreted.");
250 STAM_REG_USED(pVM, &pStats->StatRZFailedInvlPg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/InvlPg", STAMUNIT_OCCURENCES, "The number of times INVLPG was not interpreted.");
251 STAM_REG_USED(pVM, &pStats->StatR3FailedInvlPg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/InvlPg", STAMUNIT_OCCURENCES, "The number of times INVLPG was not interpreted.");
252 STAM_REG_USED(pVM, &pStats->StatRZFailedIret, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was not interpreted.");
253 STAM_REG_USED(pVM, &pStats->StatR3FailedIret, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was not interpreted.");
254 STAM_REG_USED(pVM, &pStats->StatRZFailedLLdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was not interpreted.");
255 STAM_REG_USED(pVM, &pStats->StatR3FailedLLdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was not interpreted.");
256 STAM_REG_USED(pVM, &pStats->StatRZFailedLIdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/LIdt", STAMUNIT_OCCURENCES, "The number of times LIDT was not interpreted.");
257 STAM_REG_USED(pVM, &pStats->StatR3FailedLIdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/LIdt", STAMUNIT_OCCURENCES, "The number of times LIDT was not interpreted.");
258 STAM_REG_USED(pVM, &pStats->StatRZFailedLGdt, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/LGdt", STAMUNIT_OCCURENCES, "The number of times LGDT was not interpreted.");
259 STAM_REG_USED(pVM, &pStats->StatR3FailedLGdt, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/LGdt", STAMUNIT_OCCURENCES, "The number of times LGDT was not interpreted.");
260 STAM_REG_USED(pVM, &pStats->StatRZFailedMov, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was not interpreted.");
261 STAM_REG_USED(pVM, &pStats->StatR3FailedMov, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was not interpreted.");
262 STAM_REG_USED(pVM, &pStats->StatRZFailedMovCRx, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was not interpreted.");
263 STAM_REG_USED(pVM, &pStats->StatR3FailedMovCRx, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was not interpreted.");
264 STAM_REG_USED(pVM, &pStats->StatRZFailedMovDRx, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was not interpreted.");
265 STAM_REG_USED(pVM, &pStats->StatR3FailedMovDRx, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was not interpreted.");
266 STAM_REG_USED(pVM, &pStats->StatRZFailedOr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Or", STAMUNIT_OCCURENCES, "The number of times OR was not interpreted.");
267 STAM_REG_USED(pVM, &pStats->StatR3FailedOr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Or", STAMUNIT_OCCURENCES, "The number of times OR was not interpreted.");
268 STAM_REG_USED(pVM, &pStats->StatRZFailedPop, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Pop", STAMUNIT_OCCURENCES, "The number of times POP was not interpreted.");
269 STAM_REG_USED(pVM, &pStats->StatR3FailedPop, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Pop", STAMUNIT_OCCURENCES, "The number of times POP was not interpreted.");
270 STAM_REG_USED(pVM, &pStats->StatRZFailedSti, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Sti", STAMUNIT_OCCURENCES, "The number of times STI was not interpreted.");
271 STAM_REG_USED(pVM, &pStats->StatR3FailedSti, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Sti", STAMUNIT_OCCURENCES, "The number of times STI was not interpreted.");
272 STAM_REG_USED(pVM, &pStats->StatRZFailedXchg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was not interpreted.");
273 STAM_REG_USED(pVM, &pStats->StatR3FailedXchg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was not interpreted.");
274 STAM_REG_USED(pVM, &pStats->StatRZFailedXor, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was not interpreted.");
275 STAM_REG_USED(pVM, &pStats->StatR3FailedXor, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was not interpreted.");
276 STAM_REG_USED(pVM, &pStats->StatRZFailedMonitor, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
277 STAM_REG_USED(pVM, &pStats->StatR3FailedMonitor, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
278 STAM_REG_USED(pVM, &pStats->StatRZFailedMWait, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/MWait", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
279 STAM_REG_USED(pVM, &pStats->StatR3FailedMWait, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/MWait", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
280 STAM_REG_USED(pVM, &pStats->StatRZFailedRdtsc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was not interpreted.");
281 STAM_REG_USED(pVM, &pStats->StatR3FailedRdtsc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was not interpreted.");
282 STAM_REG_USED(pVM, &pStats->StatRZFailedRdmsr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Rdmsr", STAMUNIT_OCCURENCES, "The number of times RDMSR was not interpreted.");
283 STAM_REG_USED(pVM, &pStats->StatR3FailedRdmsr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Rdmsr", STAMUNIT_OCCURENCES, "The number of times RDMSR was not interpreted.");
284 STAM_REG_USED(pVM, &pStats->StatRZFailedWrmsr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Wrmsr", STAMUNIT_OCCURENCES, "The number of times WRMSR was not interpreted.");
285 STAM_REG_USED(pVM, &pStats->StatR3FailedWrmsr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Wrmsr", STAMUNIT_OCCURENCES, "The number of times WRMSR was not interpreted.");
286 STAM_REG_USED(pVM, &pStats->StatRZFailedLmsw, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Lmsw", STAMUNIT_OCCURENCES, "The number of times LMSW was not interpreted.");
287 STAM_REG_USED(pVM, &pStats->StatR3FailedLmsw, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Lmsw", STAMUNIT_OCCURENCES, "The number of times LMSW was not interpreted.");
288
289 STAM_REG_USED(pVM, &pStats->StatRZFailedMisc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Misc", STAMUNIT_OCCURENCES, "The number of times some misc instruction was encountered.");
290 STAM_REG_USED(pVM, &pStats->StatR3FailedMisc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Misc", STAMUNIT_OCCURENCES, "The number of times some misc instruction was encountered.");
291 STAM_REG_USED(pVM, &pStats->StatRZFailedAdd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Add", STAMUNIT_OCCURENCES, "The number of times ADD was not interpreted.");
292 STAM_REG_USED(pVM, &pStats->StatR3FailedAdd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Add", STAMUNIT_OCCURENCES, "The number of times ADD was not interpreted.");
293 STAM_REG_USED(pVM, &pStats->StatRZFailedAdc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was not interpreted.");
294 STAM_REG_USED(pVM, &pStats->StatR3FailedAdc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was not interpreted.");
295 STAM_REG_USED(pVM, &pStats->StatRZFailedBtr, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was not interpreted.");
296 STAM_REG_USED(pVM, &pStats->StatR3FailedBtr, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was not interpreted.");
297 STAM_REG_USED(pVM, &pStats->StatRZFailedBts, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was not interpreted.");
298 STAM_REG_USED(pVM, &pStats->StatR3FailedBts, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was not interpreted.");
299 STAM_REG_USED(pVM, &pStats->StatRZFailedBtc, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was not interpreted.");
300 STAM_REG_USED(pVM, &pStats->StatR3FailedBtc, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was not interpreted.");
301 STAM_REG_USED(pVM, &pStats->StatRZFailedCli, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Cli", STAMUNIT_OCCURENCES, "The number of times CLI was not interpreted.");
302 STAM_REG_USED(pVM, &pStats->StatR3FailedCli, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Cli", STAMUNIT_OCCURENCES, "The number of times CLI was not interpreted.");
303 STAM_REG_USED(pVM, &pStats->StatRZFailedCmpXchg, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was not interpreted.");
304 STAM_REG_USED(pVM, &pStats->StatR3FailedCmpXchg, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was not interpreted.");
305 STAM_REG_USED(pVM, &pStats->StatRZFailedCmpXchg8b, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was not interpreted.");
306 STAM_REG_USED(pVM, &pStats->StatR3FailedCmpXchg8b, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was not interpreted.");
307 STAM_REG_USED(pVM, &pStats->StatRZFailedXAdd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was not interpreted.");
308 STAM_REG_USED(pVM, &pStats->StatR3FailedXAdd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was not interpreted.");
309 STAM_REG_USED(pVM, &pStats->StatRZFailedMovNTPS, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/MovNTPS", STAMUNIT_OCCURENCES, "The number of times MOVNTPS was not interpreted.");
310 STAM_REG_USED(pVM, &pStats->StatR3FailedMovNTPS, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/MovNTPS", STAMUNIT_OCCURENCES, "The number of times MOVNTPS was not interpreted.");
311 STAM_REG_USED(pVM, &pStats->StatRZFailedStosWD, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/StosWD", STAMUNIT_OCCURENCES, "The number of times STOSWD was not interpreted.");
312 STAM_REG_USED(pVM, &pStats->StatR3FailedStosWD, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/StosWD", STAMUNIT_OCCURENCES, "The number of times STOSWD was not interpreted.");
313 STAM_REG_USED(pVM, &pStats->StatRZFailedSub, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was not interpreted.");
314 STAM_REG_USED(pVM, &pStats->StatR3FailedSub, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was not interpreted.");
315 STAM_REG_USED(pVM, &pStats->StatRZFailedWbInvd, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/WbInvd", STAMUNIT_OCCURENCES, "The number of times WBINVD was not interpreted.");
316 STAM_REG_USED(pVM, &pStats->StatR3FailedWbInvd, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/WbInvd", STAMUNIT_OCCURENCES, "The number of times WBINVD was not interpreted.");
317
318 STAM_REG_USED(pVM, &pStats->StatRZFailedUserMode, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/UserMode", STAMUNIT_OCCURENCES, "The number of rejections because of CPL.");
319 STAM_REG_USED(pVM, &pStats->StatR3FailedUserMode, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/UserMode", STAMUNIT_OCCURENCES, "The number of rejections because of CPL.");
320 STAM_REG_USED(pVM, &pStats->StatRZFailedPrefix, STAMTYPE_COUNTER, "/EM/RZ/Interpret/Failed/Prefix", STAMUNIT_OCCURENCES, "The number of rejections because of prefix .");
321 STAM_REG_USED(pVM, &pStats->StatR3FailedPrefix, STAMTYPE_COUNTER, "/EM/R3/Interpret/Failed/Prefix", STAMUNIT_OCCURENCES, "The number of rejections because of prefix .");
322
323 STAM_REG_USED(pVM, &pStats->StatCli, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Cli", STAMUNIT_OCCURENCES, "Number of cli instructions.");
324 STAM_REG_USED(pVM, &pStats->StatSti, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Sti", STAMUNIT_OCCURENCES, "Number of sli instructions.");
325 STAM_REG_USED(pVM, &pStats->StatIn, STAMTYPE_COUNTER, "/EM/R3/PrivInst/In", STAMUNIT_OCCURENCES, "Number of in instructions.");
326 STAM_REG_USED(pVM, &pStats->StatOut, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Out", STAMUNIT_OCCURENCES, "Number of out instructions.");
327 STAM_REG_USED(pVM, &pStats->StatHlt, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Hlt", STAMUNIT_OCCURENCES, "Number of hlt instructions not handled in GC because of PATM.");
328 STAM_REG_USED(pVM, &pStats->StatInvlpg, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Invlpg", STAMUNIT_OCCURENCES, "Number of invlpg instructions.");
329 STAM_REG_USED(pVM, &pStats->StatMisc, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Misc", STAMUNIT_OCCURENCES, "Number of misc. instructions.");
330 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[0], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR0, X", STAMUNIT_OCCURENCES, "Number of mov CR0 read instructions.");
331 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[1], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR1, X", STAMUNIT_OCCURENCES, "Number of mov CR1 read instructions.");
332 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[2], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR2, X", STAMUNIT_OCCURENCES, "Number of mov CR2 read instructions.");
333 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[3], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR3, X", STAMUNIT_OCCURENCES, "Number of mov CR3 read instructions.");
334 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[4], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov CR4, X", STAMUNIT_OCCURENCES, "Number of mov CR4 read instructions.");
335 STAM_REG_USED(pVM, &pStats->StatMovReadCR[0], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR0", STAMUNIT_OCCURENCES, "Number of mov CR0 write instructions.");
336 STAM_REG_USED(pVM, &pStats->StatMovReadCR[1], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR1", STAMUNIT_OCCURENCES, "Number of mov CR1 write instructions.");
337 STAM_REG_USED(pVM, &pStats->StatMovReadCR[2], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR2", STAMUNIT_OCCURENCES, "Number of mov CR2 write instructions.");
338 STAM_REG_USED(pVM, &pStats->StatMovReadCR[3], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR3", STAMUNIT_OCCURENCES, "Number of mov CR3 write instructions.");
339 STAM_REG_USED(pVM, &pStats->StatMovReadCR[4], STAMTYPE_COUNTER, "/EM/R3/PrivInst/Mov X, CR4", STAMUNIT_OCCURENCES, "Number of mov CR4 write instructions.");
340 STAM_REG_USED(pVM, &pStats->StatMovDRx, STAMTYPE_COUNTER, "/EM/R3/PrivInst/MovDRx", STAMUNIT_OCCURENCES, "Number of mov DRx instructions.");
341 STAM_REG_USED(pVM, &pStats->StatIret, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Iret", STAMUNIT_OCCURENCES, "Number of iret instructions.");
342 STAM_REG_USED(pVM, &pStats->StatMovLgdt, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Lgdt", STAMUNIT_OCCURENCES, "Number of lgdt instructions.");
343 STAM_REG_USED(pVM, &pStats->StatMovLidt, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Lidt", STAMUNIT_OCCURENCES, "Number of lidt instructions.");
344 STAM_REG_USED(pVM, &pStats->StatMovLldt, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Lldt", STAMUNIT_OCCURENCES, "Number of lldt instructions.");
345 STAM_REG_USED(pVM, &pStats->StatSysEnter, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Sysenter", STAMUNIT_OCCURENCES, "Number of sysenter instructions.");
346 STAM_REG_USED(pVM, &pStats->StatSysExit, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Sysexit", STAMUNIT_OCCURENCES, "Number of sysexit instructions.");
347 STAM_REG_USED(pVM, &pStats->StatSysCall, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Syscall", STAMUNIT_OCCURENCES, "Number of syscall instructions.");
348 STAM_REG_USED(pVM, &pStats->StatSysRet, STAMTYPE_COUNTER, "/EM/R3/PrivInst/Sysret", STAMUNIT_OCCURENCES, "Number of sysret instructions.");
349
350 STAM_REG(pVM, &pVM->em.s.StatTotalClis, STAMTYPE_COUNTER, "/EM/Cli/Total", STAMUNIT_OCCURENCES, "Total number of cli instructions executed.");
351 pVM->em.s.pCliStatTree = 0;
352#endif /* VBOX_WITH_STATISTICS */
353
354 /* these should be considered for release statistics. */
355 STAM_REL_REG(pVM, &pVM->em.s.StatForcedActions, STAMTYPE_PROFILE, "/PROF/EM/ForcedActions", STAMUNIT_TICKS_PER_CALL, "Profiling forced action execution.");
356 STAM_REG(pVM, &pVM->em.s.StatIOEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/IO", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawExecuteIOInstruction.");
357 STAM_REG(pVM, &pVM->em.s.StatPrivEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/Priv", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawPrivileged.");
358 STAM_REG(pVM, &pVM->em.s.StatMiscEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/Misc", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawExecuteInstruction.");
359
360 STAM_REL_REG(pVM, &pVM->em.s.StatHalted, STAMTYPE_PROFILE, "/PROF/EM/Halted", STAMUNIT_TICKS_PER_CALL, "Profiling halted state (VMR3WaitHalted).");
361 STAM_REG(pVM, &pVM->em.s.StatHwAccEntry, STAMTYPE_PROFILE, "/PROF/EM/HwAccEnter", STAMUNIT_TICKS_PER_CALL, "Profiling Hardware Accelerated Mode entry overhead.");
362 STAM_REG(pVM, &pVM->em.s.StatHwAccExec, STAMTYPE_PROFILE, "/PROF/EM/HwAccExec", STAMUNIT_TICKS_PER_CALL, "Profiling Hardware Accelerated Mode execution.");
363 STAM_REG(pVM, &pVM->em.s.StatREMEmu, STAMTYPE_PROFILE, "/PROF/EM/REMEmuSingle", STAMUNIT_TICKS_PER_CALL, "Profiling single instruction REM execution.");
364 STAM_REG(pVM, &pVM->em.s.StatREMExec, STAMTYPE_PROFILE, "/PROF/EM/REMExec", STAMUNIT_TICKS_PER_CALL, "Profiling REM execution.");
365 STAM_REG(pVM, &pVM->em.s.StatREMSync, STAMTYPE_PROFILE, "/PROF/EM/REMSync", STAMUNIT_TICKS_PER_CALL, "Profiling REM context syncing.");
366 STAM_REL_REG(pVM, &pVM->em.s.StatREMTotal, STAMTYPE_PROFILE, "/PROF/EM/REMTotal", STAMUNIT_TICKS_PER_CALL, "Profiling emR3RemExecute (excluding FFs).");
367 STAM_REG(pVM, &pVM->em.s.StatRAWEntry, STAMTYPE_PROFILE, "/PROF/EM/RAWEnter", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode entry overhead.");
368 STAM_REG(pVM, &pVM->em.s.StatRAWExec, STAMTYPE_PROFILE, "/PROF/EM/RAWExec", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode execution.");
369 STAM_REG(pVM, &pVM->em.s.StatRAWTail, STAMTYPE_PROFILE, "/PROF/EM/RAWTail", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode tail overhead.");
370 STAM_REL_REG(pVM, &pVM->em.s.StatRAWTotal, STAMTYPE_PROFILE, "/PROF/EM/RAWTotal", STAMUNIT_TICKS_PER_CALL, "Profiling emR3RawExecute (excluding FFs).");
371 STAM_REL_REG(pVM, &pVM->em.s.StatTotal, STAMTYPE_PROFILE_ADV, "/PROF/EM/Total", STAMUNIT_TICKS_PER_CALL, "Profiling EMR3ExecuteVM.");
372
373
374 return VINF_SUCCESS;
375}
376
377
378/**
379 * Applies relocations to data and code managed by this
380 * component. This function will be called at init and
381 * whenever the VMM need to relocate it self inside the GC.
382 *
383 * @param pVM The VM.
384 */
385VMMR3DECL(void) EMR3Relocate(PVM pVM)
386{
387 LogFlow(("EMR3Relocate\n"));
388 if (pVM->em.s.pStatsR3)
389 pVM->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVM->em.s.pStatsR3);
390}
391
392
393/**
394 * Reset notification.
395 *
396 * @param pVM
397 */
398VMMR3DECL(void) EMR3Reset(PVM pVM)
399{
400 LogFlow(("EMR3Reset: \n"));
401 pVM->em.s.fForceRAW = false;
402}
403
404
405/**
406 * Terminates the EM.
407 *
408 * Termination means cleaning up and freeing all resources,
409 * the VM it self is at this point powered off or suspended.
410 *
411 * @returns VBox status code.
412 * @param pVM The VM to operate on.
413 */
414VMMR3DECL(int) EMR3Term(PVM pVM)
415{
416 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
417
418 return VINF_SUCCESS;
419}
420
421
422/**
423 * Execute state save operation.
424 *
425 * @returns VBox status code.
426 * @param pVM VM Handle.
427 * @param pSSM SSM operation handle.
428 */
429static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
430{
431 return SSMR3PutBool(pSSM, pVM->em.s.fForceRAW);
432}
433
434
435/**
436 * Execute state load operation.
437 *
438 * @returns VBox status code.
439 * @param pVM VM Handle.
440 * @param pSSM SSM operation handle.
441 * @param u32Version Data layout version.
442 */
443static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
444{
445 /*
446 * Validate version.
447 */
448 if (u32Version != EM_SAVED_STATE_VERSION)
449 {
450 AssertMsgFailed(("emR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, EM_SAVED_STATE_VERSION));
451 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
452 }
453
454 /*
455 * Load the saved state.
456 */
457 int rc = SSMR3GetBool(pSSM, &pVM->em.s.fForceRAW);
458 if (VBOX_FAILURE(rc))
459 pVM->em.s.fForceRAW = false;
460
461 Assert(!pVM->em.s.pCliStatTree);
462 return rc;
463}
464
465
466/**
467 * Enables or disables a set of raw-mode execution modes.
468 *
469 * @returns VINF_SUCCESS on success.
470 * @returns VINF_RESCHEDULE if a rescheduling might be required.
471 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
472 *
473 * @param pVM The VM to operate on.
474 * @param enmMode The execution mode change.
475 * @thread The emulation thread.
476 */
477VMMR3DECL(int) EMR3RawSetMode(PVM pVM, EMRAWMODE enmMode)
478{
479 switch (enmMode)
480 {
481 case EMRAW_NONE:
482 pVM->fRawR3Enabled = false;
483 pVM->fRawR0Enabled = false;
484 break;
485 case EMRAW_RING3_ENABLE:
486 pVM->fRawR3Enabled = true;
487 break;
488 case EMRAW_RING3_DISABLE:
489 pVM->fRawR3Enabled = false;
490 break;
491 case EMRAW_RING0_ENABLE:
492 pVM->fRawR0Enabled = true;
493 break;
494 case EMRAW_RING0_DISABLE:
495 pVM->fRawR0Enabled = false;
496 break;
497 default:
498 AssertMsgFailed(("Invalid enmMode=%d\n", enmMode));
499 return VERR_INVALID_PARAMETER;
500 }
501 Log(("EMR3SetRawMode: fRawR3Enabled=%RTbool fRawR0Enabled=%RTbool\n",
502 pVM->fRawR3Enabled, pVM->fRawR0Enabled));
503 return pVM->em.s.enmState == EMSTATE_RAW ? VINF_EM_RESCHEDULE : VINF_SUCCESS;
504}
505
506
507/**
508 * Raise a fatal error.
509 *
510 * Safely terminate the VM with full state report and stuff. This function
511 * will naturally never return.
512 *
513 * @param pVM VM handle.
514 * @param rc VBox status code.
515 */
516VMMR3DECL(void) EMR3FatalError(PVM pVM, int rc)
517{
518 longjmp(pVM->em.s.u.FatalLongJump, rc);
519 AssertReleaseMsgFailed(("longjmp returned!\n"));
520}
521
522
523/**
524 * Gets the EM state name.
525 *
526 * @returns pointer to read only state name,
527 * @param enmState The state.
528 */
529VMMR3DECL(const char *) EMR3GetStateName(EMSTATE enmState)
530{
531 switch (enmState)
532 {
533 case EMSTATE_NONE: return "EMSTATE_NONE";
534 case EMSTATE_RAW: return "EMSTATE_RAW";
535 case EMSTATE_HWACC: return "EMSTATE_HWACC";
536 case EMSTATE_REM: return "EMSTATE_REM";
537 case EMSTATE_PARAV: return "EMSTATE_PARAV";
538 case EMSTATE_HALTED: return "EMSTATE_HALTED";
539 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
540 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
541 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
542 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
543 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
544 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
545 default: return "Unknown!";
546 }
547}
548
549
550#ifdef VBOX_WITH_STATISTICS
551/**
552 * Just a braindead function to keep track of cli addresses.
553 * @param pVM VM handle.
554 * @param pInstrGC The EIP of the cli instruction.
555 */
556static void emR3RecordCli(PVM pVM, RTGCPTR pInstrGC)
557{
558 PCLISTAT pRec;
559
560 pRec = (PCLISTAT)RTAvlPVGet(&pVM->em.s.pCliStatTree, (AVLPVKEY)pInstrGC);
561 if (!pRec)
562 {
563 /* New cli instruction; insert into the tree. */
564 pRec = (PCLISTAT)MMR3HeapAllocZ(pVM, MM_TAG_EM, sizeof(*pRec));
565 Assert(pRec);
566 if (!pRec)
567 return;
568 pRec->Core.Key = (AVLPVKEY)pInstrGC;
569
570 char szCliStatName[32];
571 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%VGv", pInstrGC);
572 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed.");
573
574 bool fRc = RTAvlPVInsert(&pVM->em.s.pCliStatTree, &pRec->Core);
575 Assert(fRc); NOREF(fRc);
576 }
577 STAM_COUNTER_INC(&pRec->Counter);
578 STAM_COUNTER_INC(&pVM->em.s.StatTotalClis);
579}
580#endif /* VBOX_WITH_STATISTICS */
581
582
583/**
584 * Debug loop.
585 *
586 * @returns VBox status code for EM.
587 * @param pVM VM handle.
588 * @param rc Current EM VBox status code..
589 */
590static int emR3Debug(PVM pVM, int rc)
591{
592 for (;;)
593 {
594 Log(("emR3Debug: rc=%Vrc\n", rc));
595 const int rcLast = rc;
596
597 /*
598 * Debug related RC.
599 */
600 switch (rc)
601 {
602 /*
603 * Single step an instruction.
604 */
605 case VINF_EM_DBG_STEP:
606 if ( pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
607 || pVM->em.s.enmState == EMSTATE_DEBUG_HYPER
608 || pVM->em.s.fForceRAW /* paranoia */)
609 rc = emR3RawStep(pVM);
610 else
611 {
612 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
613 rc = emR3RemStep(pVM);
614 }
615 break;
616
617 /*
618 * Simple events: stepped, breakpoint, stop/assertion.
619 */
620 case VINF_EM_DBG_STEPPED:
621 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
622 break;
623
624 case VINF_EM_DBG_BREAKPOINT:
625 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
626 break;
627
628 case VINF_EM_DBG_STOP:
629 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
630 break;
631
632 case VINF_EM_DBG_HYPER_STEPPED:
633 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
634 break;
635
636 case VINF_EM_DBG_HYPER_BREAKPOINT:
637 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
638 break;
639
640 case VINF_EM_DBG_HYPER_ASSERTION:
641 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
642 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
643 break;
644
645 /*
646 * Guru meditation.
647 */
648 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
649 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
650 break;
651
652 default: /** @todo don't use default for guru, but make special errors code! */
653 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
654 break;
655 }
656
657 /*
658 * Process the result.
659 */
660 do
661 {
662 switch (rc)
663 {
664 /*
665 * Continue the debugging loop.
666 */
667 case VINF_EM_DBG_STEP:
668 case VINF_EM_DBG_STOP:
669 case VINF_EM_DBG_STEPPED:
670 case VINF_EM_DBG_BREAKPOINT:
671 case VINF_EM_DBG_HYPER_STEPPED:
672 case VINF_EM_DBG_HYPER_BREAKPOINT:
673 case VINF_EM_DBG_HYPER_ASSERTION:
674 break;
675
676 /*
677 * Resuming execution (in some form) has to be done here if we got
678 * a hypervisor debug event.
679 */
680 case VINF_SUCCESS:
681 case VINF_EM_RESUME:
682 case VINF_EM_SUSPEND:
683 case VINF_EM_RESCHEDULE:
684 case VINF_EM_RESCHEDULE_RAW:
685 case VINF_EM_RESCHEDULE_REM:
686 case VINF_EM_HALT:
687 if (pVM->em.s.enmState == EMSTATE_DEBUG_HYPER)
688 {
689 rc = emR3RawResumeHyper(pVM);
690 if (rc != VINF_SUCCESS && VBOX_SUCCESS(rc))
691 continue;
692 }
693 if (rc == VINF_SUCCESS)
694 rc = VINF_EM_RESCHEDULE;
695 return rc;
696
697 /*
698 * The debugger isn't attached.
699 * We'll simply turn the thing off since that's the easiest thing to do.
700 */
701 case VERR_DBGF_NOT_ATTACHED:
702 switch (rcLast)
703 {
704 case VINF_EM_DBG_HYPER_ASSERTION:
705 case VINF_EM_DBG_HYPER_STEPPED:
706 case VINF_EM_DBG_HYPER_BREAKPOINT:
707 return rcLast;
708 }
709 return VINF_EM_OFF;
710
711 /*
712 * Status codes terminating the VM in one or another sense.
713 */
714 case VINF_EM_TERMINATE:
715 case VINF_EM_OFF:
716 case VINF_EM_RESET:
717 case VINF_EM_RAW_STALE_SELECTOR:
718 case VINF_EM_RAW_IRET_TRAP:
719 case VERR_TRPM_PANIC:
720 case VERR_TRPM_DONT_PANIC:
721 case VERR_INTERNAL_ERROR:
722 return rc;
723
724 /*
725 * The rest is unexpected, and will keep us here.
726 */
727 default:
728 AssertMsgFailed(("Unxpected rc %Vrc!\n", rc));
729 break;
730 }
731 } while (false);
732 } /* debug for ever */
733}
734
735
736/**
737 * Steps recompiled code.
738 *
739 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
740 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
741 *
742 * @param pVM VM handle.
743 */
744static int emR3RemStep(PVM pVM)
745{
746 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
747
748 /*
749 * Switch to REM, step instruction, switch back.
750 */
751 int rc = REMR3State(pVM, pVM->em.s.fREMFlushTBs);
752 if (VBOX_SUCCESS(rc))
753 {
754 rc = REMR3Step(pVM);
755 REMR3StateBack(pVM);
756 pVM->em.s.fREMFlushTBs = false;
757 }
758 LogFlow(("emR3RemStep: returns %Vrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
759 return rc;
760}
761
762
763/**
764 * Executes recompiled code.
765 *
766 * This function contains the recompiler version of the inner
767 * execution loop (the outer loop being in EMR3ExecuteVM()).
768 *
769 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
770 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
771 *
772 * @param pVM VM handle.
773 * @param pfFFDone Where to store an indicator telling wheter or not
774 * FFs were done before returning.
775 *
776 */
777static int emR3RemExecute(PVM pVM, bool *pfFFDone)
778{
779#ifdef LOG_ENABLED
780 PCPUMCTX pCtx = pVM->em.s.pCtx;
781 uint32_t cpl = CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx));
782
783 if (pCtx->eflags.Bits.u1VM)
784 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
785 else
786 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
787#endif
788 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatREMTotal, a);
789
790#if defined(VBOX_STRICT) && defined(DEBUG_bird)
791 AssertMsg( VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3|VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
792 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVM)), /** @todo #1419 - get flat address. */
793 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
794#endif
795
796 /*
797 * Spin till we get a forced action which returns anything but VINF_SUCCESS
798 * or the REM suggests raw-mode execution.
799 */
800 *pfFFDone = false;
801 bool fInREMState = false;
802 int rc = VINF_SUCCESS;
803 for (;;)
804 {
805 /*
806 * Update REM state if not already in sync.
807 */
808 if (!fInREMState)
809 {
810 STAM_PROFILE_START(&pVM->em.s.StatREMSync, b);
811 rc = REMR3State(pVM, pVM->em.s.fREMFlushTBs);
812 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, b);
813 if (VBOX_FAILURE(rc))
814 break;
815 fInREMState = true;
816 pVM->em.s.fREMFlushTBs = false;
817
818 /*
819 * We might have missed the raising of VMREQ, TIMER and some other
820 * imporant FFs while we were busy switching the state. So, check again.
821 */
822 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_TIMER | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET))
823 {
824 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fForcedActions));
825 goto l_REMDoForcedActions;
826 }
827 }
828
829
830 /*
831 * Execute REM.
832 */
833 STAM_PROFILE_START(&pVM->em.s.StatREMExec, c);
834 rc = REMR3Run(pVM);
835 STAM_PROFILE_STOP(&pVM->em.s.StatREMExec, c);
836
837
838 /*
839 * Deal with high priority post execution FFs before doing anything else.
840 */
841 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
842 rc = emR3HighPriorityPostForcedActions(pVM, rc);
843
844 /*
845 * Process the returned status code.
846 * (Try keep this short! Call functions!)
847 */
848 if (rc != VINF_SUCCESS)
849 {
850 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
851 break;
852 if (rc != VINF_REM_INTERRUPED_FF)
853 {
854 /*
855 * Anything which is not known to us means an internal error
856 * and the termination of the VM!
857 */
858 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Vra\n", rc));
859 break;
860 }
861 }
862
863
864 /*
865 * Check and execute forced actions.
866 * Sync back the VM state before calling any of these.
867 */
868#ifdef VBOX_HIGH_RES_TIMERS_HACK
869 TMTimerPoll(pVM);
870#endif
871 if (VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK & ~(VM_FF_CSAM_PENDING_ACTION | VM_FF_CSAM_SCAN_PAGE)))
872 {
873l_REMDoForcedActions:
874 if (fInREMState)
875 {
876 STAM_PROFILE_START(&pVM->em.s.StatREMSync, d);
877 REMR3StateBack(pVM);
878 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, d);
879 fInREMState = false;
880 }
881 STAM_REL_PROFILE_ADV_SUSPEND(&pVM->em.s.StatREMTotal, a);
882 rc = emR3ForcedActions(pVM, rc);
883 STAM_REL_PROFILE_ADV_RESUME(&pVM->em.s.StatREMTotal, a);
884 if ( rc != VINF_SUCCESS
885 && rc != VINF_EM_RESCHEDULE_REM)
886 {
887 *pfFFDone = true;
888 break;
889 }
890 }
891
892 } /* The Inner Loop, recompiled execution mode version. */
893
894
895 /*
896 * Returning. Sync back the VM state if required.
897 */
898 if (fInREMState)
899 {
900 STAM_PROFILE_START(&pVM->em.s.StatREMSync, e);
901 REMR3StateBack(pVM);
902 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, e);
903 }
904
905 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatREMTotal, a);
906 return rc;
907}
908
909
910/**
911 * Resumes executing hypervisor after a debug event.
912 *
913 * This is kind of special since our current guest state is
914 * potentially out of sync.
915 *
916 * @returns VBox status code.
917 * @param pVM The VM handle.
918 */
919static int emR3RawResumeHyper(PVM pVM)
920{
921 int rc;
922 PCPUMCTX pCtx = pVM->em.s.pCtx;
923 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_HYPER);
924 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pCtx->cs, pCtx->eip, pCtx->eflags));
925
926 /*
927 * Resume execution.
928 */
929 CPUMRawEnter(pVM, NULL);
930 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_RF);
931 rc = VMMR3ResumeHyper(pVM);
932 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Vrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));
933 rc = CPUMRawLeave(pVM, NULL, rc);
934 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
935
936 /*
937 * Deal with the return code.
938 */
939 rc = emR3HighPriorityPostForcedActions(pVM, rc);
940 rc = emR3RawHandleRC(pVM, pCtx, rc);
941 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
942 return rc;
943}
944
945
946/**
947 * Steps rawmode.
948 *
949 * @returns VBox status code.
950 * @param pVM The VM handle.
951 */
952static int emR3RawStep(PVM pVM)
953{
954 Assert( pVM->em.s.enmState == EMSTATE_DEBUG_HYPER
955 || pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
956 || pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
957 int rc;
958 PCPUMCTX pCtx = pVM->em.s.pCtx;
959 bool fGuest = pVM->em.s.enmState != EMSTATE_DEBUG_HYPER;
960#ifndef DEBUG_sandervl
961 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVM) : CPUMGetHyperCS(pVM),
962 fGuest ? CPUMGetGuestEIP(pVM) : CPUMGetHyperEIP(pVM), fGuest ? CPUMGetGuestEFlags(pVM) : CPUMGetHyperEFlags(pVM)));
963#endif
964 if (fGuest)
965 {
966 /*
967 * Check vital forced actions, but ignore pending interrupts and timers.
968 */
969 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
970 {
971 rc = emR3RawForcedActions(pVM, pCtx);
972 if (VBOX_FAILURE(rc))
973 return rc;
974 }
975
976 /*
977 * Set flags for single stepping.
978 */
979 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
980 }
981 else
982 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
983
984 /*
985 * Single step.
986 * We do not start time or anything, if anything we should just do a few nanoseconds.
987 */
988 CPUMRawEnter(pVM, NULL);
989 do
990 {
991 if (pVM->em.s.enmState == EMSTATE_DEBUG_HYPER)
992 rc = VMMR3ResumeHyper(pVM);
993 else
994 rc = VMMR3RawRunGC(pVM);
995#ifndef DEBUG_sandervl
996 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Vrc\n", fGuest ? CPUMGetGuestCS(pVM) : CPUMGetHyperCS(pVM),
997 fGuest ? CPUMGetGuestEIP(pVM) : CPUMGetHyperEIP(pVM), fGuest ? CPUMGetGuestEFlags(pVM) : CPUMGetHyperEFlags(pVM), rc));
998#endif
999 } while ( rc == VINF_SUCCESS
1000 || rc == VINF_EM_RAW_INTERRUPT);
1001 rc = CPUMRawLeave(pVM, NULL, rc);
1002 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
1003
1004 /*
1005 * Make sure the trap flag is cleared.
1006 * (Too bad if the guest is trying to single step too.)
1007 */
1008 if (fGuest)
1009 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1010 else
1011 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) & ~X86_EFL_TF);
1012
1013 /*
1014 * Deal with the return codes.
1015 */
1016 rc = emR3HighPriorityPostForcedActions(pVM, rc);
1017 rc = emR3RawHandleRC(pVM, pCtx, rc);
1018 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
1019 return rc;
1020}
1021
1022
1023#ifdef DEBUG
1024
1025/**
1026 * Steps hardware accelerated mode.
1027 *
1028 * @returns VBox status code.
1029 * @param pVM The VM handle.
1030 */
1031static int emR3HwAccStep(PVM pVM)
1032{
1033 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);
1034
1035 int rc;
1036 PCPUMCTX pCtx = pVM->em.s.pCtx;
1037 VM_FF_CLEAR(pVM, (VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
1038
1039 /*
1040 * Check vital forced actions, but ignore pending interrupts and timers.
1041 */
1042 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1043 {
1044 rc = emR3RawForcedActions(pVM, pCtx);
1045 if (VBOX_FAILURE(rc))
1046 return rc;
1047 }
1048 /*
1049 * Set flags for single stepping.
1050 */
1051 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
1052
1053 /*
1054 * Single step.
1055 * We do not start time or anything, if anything we should just do a few nanoseconds.
1056 */
1057 do
1058 {
1059 rc = VMMR3HwAccRunGC(pVM);
1060 } while ( rc == VINF_SUCCESS
1061 || rc == VINF_EM_RAW_INTERRUPT);
1062 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
1063
1064 /*
1065 * Make sure the trap flag is cleared.
1066 * (Too bad if the guest is trying to single step too.)
1067 */
1068 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1069
1070 /*
1071 * Deal with the return codes.
1072 */
1073 rc = emR3HighPriorityPostForcedActions(pVM, rc);
1074 rc = emR3RawHandleRC(pVM, pCtx, rc);
1075 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
1076 return rc;
1077}
1078
1079
1080void emR3SingleStepExecRaw(PVM pVM, uint32_t cIterations)
1081{
1082 EMSTATE enmOldState = pVM->em.s.enmState;
1083
1084 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1085
1086 Log(("Single step BEGIN:\n"));
1087 for (uint32_t i = 0; i < cIterations; i++)
1088 {
1089 DBGFR3PrgStep(pVM);
1090 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1091 emR3RawStep(pVM);
1092 }
1093 Log(("Single step END:\n"));
1094 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1095 pVM->em.s.enmState = enmOldState;
1096}
1097
1098
1099void emR3SingleStepExecHwAcc(PVM pVM, uint32_t cIterations)
1100{
1101 EMSTATE enmOldState = pVM->em.s.enmState;
1102
1103 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;
1104
1105 Log(("Single step BEGIN:\n"));
1106 for (uint32_t i = 0; i < cIterations; i++)
1107 {
1108 DBGFR3PrgStep(pVM);
1109 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1110 emR3HwAccStep(pVM);
1111 }
1112 Log(("Single step END:\n"));
1113 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1114 pVM->em.s.enmState = enmOldState;
1115}
1116
1117
1118void emR3SingleStepExecRem(PVM pVM, uint32_t cIterations)
1119{
1120 EMSTATE enmOldState = pVM->em.s.enmState;
1121
1122 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1123
1124 Log(("Single step BEGIN:\n"));
1125 for (uint32_t i = 0; i < cIterations; i++)
1126 {
1127 DBGFR3PrgStep(pVM);
1128 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1129 emR3RemStep(pVM);
1130 }
1131 Log(("Single step END:\n"));
1132 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1133 pVM->em.s.enmState = enmOldState;
1134}
1135
1136#endif /* DEBUG */
1137
1138
1139/**
1140 * Executes one (or perhaps a few more) instruction(s).
1141 *
1142 * @returns VBox status code suitable for EM.
1143 *
1144 * @param pVM VM handle.
1145 * @param rcGC GC return code
1146 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1147 * instruction and prefix the log output with this text.
1148 */
1149#ifdef LOG_ENABLED
1150static int emR3RawExecuteInstructionWorker(PVM pVM, int rcGC, const char *pszPrefix)
1151#else
1152static int emR3RawExecuteInstructionWorker(PVM pVM, int rcGC)
1153#endif
1154{
1155 PCPUMCTX pCtx = pVM->em.s.pCtx;
1156 int rc;
1157
1158 /*
1159 *
1160 * The simple solution is to use the recompiler.
1161 * The better solution is to disassemble the current instruction and
1162 * try handle as many as possible without using REM.
1163 *
1164 */
1165
1166#ifdef LOG_ENABLED
1167 /*
1168 * Disassemble the instruction if requested.
1169 */
1170 if (pszPrefix)
1171 {
1172 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
1173 DBGFR3DisasInstrCurrentLog(pVM, pszPrefix);
1174 }
1175#endif /* LOG_ENABLED */
1176
1177 /*
1178 * PATM is making life more interesting.
1179 * We cannot hand anything to REM which has an EIP inside patch code. So, we'll
1180 * tell PATM there is a trap in this code and have it take the appropriate actions
1181 * to allow us execute the code in REM.
1182 */
1183 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1184 {
1185 Log(("emR3RawExecuteInstruction: In patch block. eip=%VRv\n", pCtx->eip));
1186
1187 RTGCPTR pNewEip;
1188 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1189 switch (rc)
1190 {
1191 /*
1192 * It's not very useful to emulate a single instruction and then go back to raw
1193 * mode; just execute the whole block until IF is set again.
1194 */
1195 case VINF_SUCCESS:
1196 Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %VGv IF=%d VMIF=%x\n",
1197 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1198 pCtx->eip = pNewEip;
1199 Assert(pCtx->eip);
1200
1201 if (pCtx->eflags.Bits.u1IF)
1202 {
1203 /*
1204 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1205 */
1206 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1207 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1208 }
1209 else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
1210 {
1211 /* special case: iret, that sets IF, detected a pending irq/event */
1212 return emR3RawExecuteInstruction(pVM, "PATCHIRET");
1213 }
1214 return VINF_EM_RESCHEDULE_REM;
1215
1216 /*
1217 * One instruction.
1218 */
1219 case VINF_PATCH_EMULATE_INSTR:
1220 Log(("emR3RawExecuteInstruction: Emulate patched instruction at %VGv IF=%d VMIF=%x\n",
1221 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1222 pCtx->eip = pNewEip;
1223 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1224
1225 /*
1226 * The patch was disabled, hand it to the REM.
1227 */
1228 case VERR_PATCH_DISABLED:
1229 Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %VGv IF=%d VMIF=%x\n",
1230 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1231 pCtx->eip = pNewEip;
1232 if (pCtx->eflags.Bits.u1IF)
1233 {
1234 /*
1235 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1236 */
1237 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1238 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1239 }
1240 return VINF_EM_RESCHEDULE_REM;
1241
1242 /* Force continued patch exection; usually due to write monitored stack. */
1243 case VINF_PATCH_CONTINUE:
1244 return VINF_SUCCESS;
1245
1246 default:
1247 AssertReleaseMsgFailed(("Unknown return code %Vrc from PATMR3HandleTrap\n", rc));
1248 return VERR_INTERNAL_ERROR;
1249 }
1250 }
1251
1252#if 0
1253 /* Try our own instruction emulator before falling back to the recompiler. */
1254 DISCPUSTATE Cpu;
1255 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &Cpu, "GEN EMU");
1256 if (VBOX_SUCCESS(rc))
1257 {
1258 uint32_t size;
1259
1260 switch (Cpu.pCurInstr->opcode)
1261 {
1262 /* @todo we can do more now */
1263 case OP_MOV:
1264 case OP_AND:
1265 case OP_OR:
1266 case OP_XOR:
1267 case OP_POP:
1268 case OP_INC:
1269 case OP_DEC:
1270 case OP_XCHG:
1271 STAM_PROFILE_START(&pVM->em.s.StatMiscEmu, a);
1272 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1273 if (VBOX_SUCCESS(rc))
1274 {
1275 pCtx->rip += Cpu.opsize;
1276 STAM_PROFILE_STOP(&pVM->em.s.StatMiscEmu, a);
1277 return rc;
1278 }
1279 if (rc != VERR_EM_INTERPRETER)
1280 AssertMsgFailedReturn(("rc=%Vrc\n", rc), rc);
1281 STAM_PROFILE_STOP(&pVM->em.s.StatMiscEmu, a);
1282 break;
1283 }
1284 }
1285#endif /* 0 */
1286 STAM_PROFILE_START(&pVM->em.s.StatREMEmu, a);
1287 rc = REMR3EmulateInstruction(pVM);
1288 STAM_PROFILE_STOP(&pVM->em.s.StatREMEmu, a);
1289
1290 return rc;
1291}
1292
1293
1294/**
1295 * Executes one (or perhaps a few more) instruction(s).
1296 * This is just a wrapper for discarding pszPrefix in non-logging builds.
1297 *
1298 * @returns VBox status code suitable for EM.
1299 * @param pVM VM handle.
1300 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1301 * instruction and prefix the log output with this text.
1302 * @param rcGC GC return code
1303 */
1304DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, const char *pszPrefix, int rcGC)
1305{
1306#ifdef LOG_ENABLED
1307 return emR3RawExecuteInstructionWorker(pVM, rcGC, pszPrefix);
1308#else
1309 return emR3RawExecuteInstructionWorker(pVM, rcGC);
1310#endif
1311}
1312
1313/**
1314 * Executes one (or perhaps a few more) IO instruction(s).
1315 *
1316 * @returns VBox status code suitable for EM.
1317 * @param pVM VM handle.
1318 */
1319int emR3RawExecuteIOInstruction(PVM pVM)
1320{
1321 int rc;
1322 PCPUMCTX pCtx = pVM->em.s.pCtx;
1323
1324 STAM_PROFILE_START(&pVM->em.s.StatIOEmu, a);
1325
1326 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
1327 * as io instructions tend to come in packages of more than one
1328 */
1329 DISCPUSTATE Cpu;
1330 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &Cpu, "IO EMU");
1331 if (VBOX_SUCCESS(rc))
1332 {
1333 rc = VINF_EM_RAW_EMULATE_INSTR;
1334
1335 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
1336 {
1337 switch (Cpu.pCurInstr->opcode)
1338 {
1339 case OP_IN:
1340 {
1341 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatIn);
1342 rc = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1343 break;
1344 }
1345
1346 case OP_OUT:
1347 {
1348 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatOut);
1349 rc = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1350 break;
1351 }
1352 }
1353 }
1354 else if (Cpu.prefix & PREFIX_REP)
1355 {
1356 switch (Cpu.pCurInstr->opcode)
1357 {
1358 case OP_INSB:
1359 case OP_INSWD:
1360 {
1361 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatIn);
1362 rc = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1363 break;
1364 }
1365
1366 case OP_OUTSB:
1367 case OP_OUTSWD:
1368 {
1369 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatOut);
1370 rc = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1371 break;
1372 }
1373 }
1374 }
1375
1376 /*
1377 * Handled the I/O return codes.
1378 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1379 */
1380 if (IOM_SUCCESS(rc))
1381 {
1382 pCtx->rip += Cpu.opsize;
1383 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1384 return rc;
1385 }
1386
1387 if (rc == VINF_EM_RAW_GUEST_TRAP)
1388 {
1389 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1390 rc = emR3RawGuestTrap(pVM);
1391 return rc;
1392 }
1393 AssertMsg(rc != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
1394
1395 if (VBOX_FAILURE(rc))
1396 {
1397 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1398 return rc;
1399 }
1400 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RESCHEDULE_REM, ("rc=%Vrc\n", rc));
1401 }
1402 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1403 return emR3RawExecuteInstruction(pVM, "IO: ");
1404}
1405
1406
1407/**
1408 * Handle a guest context trap.
1409 *
1410 * @returns VBox status code suitable for EM.
1411 * @param pVM VM handle.
1412 */
1413static int emR3RawGuestTrap(PVM pVM)
1414{
1415 PCPUMCTX pCtx = pVM->em.s.pCtx;
1416
1417 /*
1418 * Get the trap info.
1419 */
1420 uint8_t u8TrapNo;
1421 TRPMEVENT enmType;
1422 RTGCUINT uErrorCode;
1423 RTGCUINTPTR uCR2;
1424 int rc = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1425 if (VBOX_FAILURE(rc))
1426 {
1427 AssertReleaseMsgFailed(("No trap! (rc=%Vrc)\n", rc));
1428 return rc;
1429 }
1430
1431 /*
1432 * Traps can be directly forwarded in hardware accelerated mode.
1433 */
1434 if (HWACCMR3IsActive(pVM))
1435 {
1436#ifdef LOGGING_ENABLED
1437 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1438 DBGFR3DisasInstrCurrentLog(pVM, "Guest trap");
1439#endif
1440 return VINF_EM_RESCHEDULE_HWACC;
1441 }
1442
1443#if 1 /* Experimental: Review, disable if it causes trouble. */
1444 /*
1445 * Handle traps in patch code first.
1446 *
1447 * We catch a few of these cases in RC before returning to R3 (#PF, #GP, #BP)
1448 * but several traps isn't handled specially by TRPM in RC and we end up here
1449 * instead. One example is #DE.
1450 */
1451 uint32_t uCpl = CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx));
1452 if ( uCpl == 0
1453 && PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
1454 {
1455 LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pCtx->eip));
1456 return emR3PatchTrap(pVM, pCtx, rc);
1457 }
1458#endif
1459
1460 /*
1461 * If the guest gate is marked unpatched, then we will check again if we can patch it.
1462 * (This assumes that we've already tried and failed to dispatch the trap in
1463 * RC for the gates that already has been patched. Which is true for most high
1464 * volume traps, because these are handled specially, but not for odd ones like #DE.)
1465 */
1466 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) == TRPM_INVALID_HANDLER)
1467 {
1468 CSAMR3CheckGates(pVM, u8TrapNo, 1);
1469 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8TrapNo, TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER));
1470
1471 /* If it was successful, then we could go back to raw mode. */
1472 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER)
1473 {
1474 /* Must check pending forced actions as our IDT or GDT might be out of sync. */
1475 rc = EMR3CheckRawForcedActions(pVM);
1476 AssertRCReturn(rc, rc);
1477
1478 TRPMERRORCODE enmError = uErrorCode != ~0U
1479 ? TRPM_TRAP_HAS_ERRORCODE
1480 : TRPM_TRAP_NO_ERRORCODE;
1481 rc = TRPMForwardTrap(pVM, CPUMCTX2CORE(pCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1);
1482 if (rc == VINF_SUCCESS /* Don't use VBOX_SUCCESS */)
1483 {
1484 TRPMResetTrap(pVM);
1485 return VINF_EM_RESCHEDULE_RAW;
1486 }
1487 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP, ("%Rrc\n", rc));
1488 }
1489 }
1490
1491 /*
1492 * Scan kernel code that traps; we might not get another chance.
1493 */
1494 /** @todo move this up before the dispatching? */
1495 if ( (pCtx->ss & X86_SEL_RPL) <= 1
1496 && !pCtx->eflags.Bits.u1VM)
1497 {
1498 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
1499 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1500 }
1501
1502 /*
1503 * Trap specific handling.
1504 */
1505 if (u8TrapNo == 6) /* (#UD) Invalid opcode. */
1506 {
1507 /*
1508 * If MONITOR & MWAIT are supported, then interpret them here.
1509 */
1510 DISCPUSTATE cpu;
1511 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");
1512 if ( VBOX_SUCCESS(rc)
1513 && (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))
1514 {
1515 uint32_t u32Dummy, u32Features, u32ExtFeatures;
1516 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);
1517 if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)
1518 {
1519 rc = TRPMResetTrap(pVM);
1520 AssertRC(rc);
1521
1522 uint32_t opsize;
1523 rc = EMInterpretInstructionCPU(pVM, &cpu, CPUMCTX2CORE(pCtx), 0, &opsize);
1524 if (VBOX_SUCCESS(rc))
1525 {
1526 pCtx->rip += cpu.opsize;
1527 return rc;
1528 }
1529 return emR3RawExecuteInstruction(pVM, "Monitor: ");
1530 }
1531 }
1532 }
1533 else if (u8TrapNo == 13) /* (#GP) Privileged exception */
1534 {
1535 /*
1536 * Handle I/O bitmap?
1537 */
1538 /** @todo We're not supposed to be here with a false guest trap concerning
1539 * I/O access. We can easily handle those in RC. */
1540 DISCPUSTATE cpu;
1541 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &cpu, "Guest Trap: ");
1542 if ( VBOX_SUCCESS(rc)
1543 && (cpu.pCurInstr->optype & OPTYPE_PORTIO))
1544 {
1545 /*
1546 * We should really check the TSS for the IO bitmap, but it's not like this
1547 * lazy approach really makes things worse.
1548 */
1549 rc = TRPMResetTrap(pVM);
1550 AssertRC(rc);
1551 return emR3RawExecuteInstruction(pVM, "IO Guest Trap: ");
1552 }
1553 }
1554
1555#ifdef LOG_ENABLED
1556 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1557 DBGFR3DisasInstrCurrentLog(pVM, "Guest trap");
1558
1559 /* Get guest page information. */
1560 uint64_t fFlags = 0;
1561 RTGCPHYS GCPhys = 0;
1562 int rc2 = PGMGstGetPage(pVM, uCR2, &fFlags, &GCPhys);
1563 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%VGp fFlags=%08llx %s %s %s%s rc2=%d\n",
1564 pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,
1565 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",
1566 fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));
1567#endif
1568
1569 /*
1570 * #PG has CR2.
1571 * (Because of stuff like above we must set CR2 in a delayed fashion.)
1572 */
1573 if (u8TrapNo == 14 /* #PG */)
1574 pCtx->cr2 = uCR2;
1575
1576 return VINF_EM_RESCHEDULE_REM;
1577}
1578
1579
1580/**
1581 * Handle a ring switch trap.
1582 * Need to do statistics and to install patches. The result is going to REM.
1583 *
1584 * @returns VBox status code suitable for EM.
1585 * @param pVM VM handle.
1586 */
1587int emR3RawRingSwitch(PVM pVM)
1588{
1589 int rc;
1590 DISCPUSTATE Cpu;
1591 PCPUMCTX pCtx = pVM->em.s.pCtx;
1592
1593 /*
1594 * sysenter, syscall & callgate
1595 */
1596 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");
1597 if (VBOX_SUCCESS(rc))
1598 {
1599 if (Cpu.pCurInstr->opcode == OP_SYSENTER)
1600 {
1601 if (pCtx->SysEnter.cs != 0)
1602 {
1603 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1604 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1605 if (VBOX_SUCCESS(rc))
1606 {
1607 DBGFR3DisasInstrCurrentLog(pVM, "Patched sysenter instruction");
1608 return VINF_EM_RESCHEDULE_RAW;
1609 }
1610 }
1611 }
1612
1613#ifdef VBOX_WITH_STATISTICS
1614 switch (Cpu.pCurInstr->opcode)
1615 {
1616 case OP_SYSENTER:
1617 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatSysEnter);
1618 break;
1619 case OP_SYSEXIT:
1620 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatSysExit);
1621 break;
1622 case OP_SYSCALL:
1623 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatSysCall);
1624 break;
1625 case OP_SYSRET:
1626 STAM_COUNTER_INC(&pVM->em.s.CTX_SUFF(pStats)->StatSysRet);
1627 break;
1628 }
1629#endif
1630 }
1631 else
1632 AssertRC(rc);
1633
1634 /* go to the REM to emulate a single instruction */
1635 return emR3RawExecuteInstruction(pVM, "RSWITCH: ");
1636}
1637
1638
1639/**
1640 * Handle a trap (\#PF or \#GP) in patch code
1641 *
1642 * @returns VBox status code suitable for EM.
1643 * @param pVM VM handle.
1644 * @param pCtx CPU context
1645 * @param gcret GC return code
1646 */
1647static int emR3PatchTrap(PVM pVM, PCPUMCTX pCtx, int gcret)
1648{
1649 uint8_t u8TrapNo;
1650 int rc;
1651 TRPMEVENT enmType;
1652 RTGCUINT uErrorCode;
1653 RTGCUINTPTR uCR2;
1654
1655 Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
1656
1657 if (gcret == VINF_PATM_PATCH_INT3)
1658 {
1659 u8TrapNo = 3;
1660 uCR2 = 0;
1661 uErrorCode = 0;
1662 }
1663 else if (gcret == VINF_PATM_PATCH_TRAP_GP)
1664 {
1665 /* No active trap in this case. Kind of ugly. */
1666 u8TrapNo = X86_XCPT_GP;
1667 uCR2 = 0;
1668 uErrorCode = 0;
1669 }
1670 else
1671 {
1672 rc = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1673 if (VBOX_FAILURE(rc))
1674 {
1675 AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Vrc) gcret=%Vrc\n", rc, gcret));
1676 return rc;
1677 }
1678 /* Reset the trap as we'll execute the original instruction again. */
1679 TRPMResetTrap(pVM);
1680 }
1681
1682 /*
1683 * Deal with traps inside patch code.
1684 * (This code won't run outside GC.)
1685 */
1686 if (u8TrapNo != 1)
1687 {
1688#ifdef LOG_ENABLED
1689 DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
1690 DBGFR3DisasInstrCurrentLog(pVM, "Patch code");
1691
1692 DISCPUSTATE Cpu;
1693 int rc;
1694
1695 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &Cpu, "Patch code: ");
1696 if ( VBOX_SUCCESS(rc)
1697 && Cpu.pCurInstr->opcode == OP_IRET)
1698 {
1699 uint32_t eip, selCS, uEFlags;
1700
1701 /* Iret crashes are bad as we have already changed the flags on the stack */
1702 rc = PGMPhysSimpleReadGCPtr(pVM, &eip, pCtx->esp, 4);
1703 rc |= PGMPhysSimpleReadGCPtr(pVM, &selCS, pCtx->esp+4, 4);
1704 rc |= PGMPhysSimpleReadGCPtr(pVM, &uEFlags, pCtx->esp+8, 4);
1705 if (rc == VINF_SUCCESS)
1706 {
1707 if ( (uEFlags & X86_EFL_VM)
1708 || (selCS & X86_SEL_RPL) == 3)
1709 {
1710 uint32_t selSS, esp;
1711
1712 rc |= PGMPhysSimpleReadGCPtr(pVM, &esp, pCtx->esp + 12, 4);
1713 rc |= PGMPhysSimpleReadGCPtr(pVM, &selSS, pCtx->esp + 16, 4);
1714
1715 if (uEFlags & X86_EFL_VM)
1716 {
1717 uint32_t selDS, selES, selFS, selGS;
1718 rc = PGMPhysSimpleReadGCPtr(pVM, &selES, pCtx->esp + 20, 4);
1719 rc |= PGMPhysSimpleReadGCPtr(pVM, &selDS, pCtx->esp + 24, 4);
1720 rc |= PGMPhysSimpleReadGCPtr(pVM, &selFS, pCtx->esp + 28, 4);
1721 rc |= PGMPhysSimpleReadGCPtr(pVM, &selGS, pCtx->esp + 32, 4);
1722 if (rc == VINF_SUCCESS)
1723 {
1724 Log(("Patch code: IRET->VM stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
1725 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
1726 }
1727 }
1728 else
1729 Log(("Patch code: IRET stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
1730 }
1731 else
1732 Log(("Patch code: IRET stack frame: return address %04X:%VGv eflags=%08x\n", selCS, eip, uEFlags));
1733 }
1734 }
1735#endif /* LOG_ENABLED */
1736 Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
1737 pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
1738
1739 RTGCPTR pNewEip;
1740 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1741 switch (rc)
1742 {
1743 /*
1744 * Execute the faulting instruction.
1745 */
1746 case VINF_SUCCESS:
1747 {
1748 /** @todo execute a whole block */
1749 Log(("emR3PatchTrap: Executing faulting instruction at new address %VGv\n", pNewEip));
1750 if (!(pVM->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1751 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1752
1753 pCtx->eip = pNewEip;
1754 AssertRelease(pCtx->eip);
1755
1756 if (pCtx->eflags.Bits.u1IF)
1757 {
1758 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an
1759 * int3 patch overwrites it and leads to blue screens. Remove the patch in this case.
1760 */
1761 if ( u8TrapNo == X86_XCPT_GP
1762 && PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))
1763 {
1764 /** @todo move to PATMR3HandleTrap */
1765 Log(("Possible Windows XP iret fault at %VGv\n", pCtx->eip));
1766 PATMR3RemovePatch(pVM, pCtx->eip);
1767 }
1768
1769 /** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
1770 /* Note: possibly because a reschedule is required (e.g. iret to V86 code) */
1771
1772 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1773 /* Interrupts are enabled; just go back to the original instruction.
1774 return VINF_SUCCESS; */
1775 }
1776 return VINF_EM_RESCHEDULE_REM;
1777 }
1778
1779 /*
1780 * One instruction.
1781 */
1782 case VINF_PATCH_EMULATE_INSTR:
1783 Log(("emR3PatchTrap: Emulate patched instruction at %VGv IF=%d VMIF=%x\n",
1784 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1785 pCtx->eip = pNewEip;
1786 AssertRelease(pCtx->eip);
1787 return emR3RawExecuteInstruction(pVM, "PATCHEMUL: ");
1788
1789 /*
1790 * The patch was disabled, hand it to the REM.
1791 */
1792 case VERR_PATCH_DISABLED:
1793 if (!(pVM->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1794 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1795 pCtx->eip = pNewEip;
1796 AssertRelease(pCtx->eip);
1797
1798 if (pCtx->eflags.Bits.u1IF)
1799 {
1800 /*
1801 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1802 */
1803 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1804 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1805 }
1806 return VINF_EM_RESCHEDULE_REM;
1807
1808 /* Force continued patch exection; usually due to write monitored stack. */
1809 case VINF_PATCH_CONTINUE:
1810 return VINF_SUCCESS;
1811
1812 /*
1813 * Anything else is *fatal*.
1814 */
1815 default:
1816 AssertReleaseMsgFailed(("Unknown return code %Vrc from PATMR3HandleTrap!\n", rc));
1817 return VERR_INTERNAL_ERROR;
1818 }
1819 }
1820 return VINF_SUCCESS;
1821}
1822
1823
1824/**
1825 * Handle a privileged instruction.
1826 *
1827 * @returns VBox status code suitable for EM.
1828 * @param pVM VM handle.
1829 */
1830int emR3RawPrivileged(PVM pVM)
1831{
1832 STAM_PROFILE_START(&pVM->em.s.StatPrivEmu, a);
1833 PCPUMCTX pCtx = pVM->em.s.pCtx;
1834
1835 Assert(!pCtx->eflags.Bits.u1VM);
1836
1837 if (PATMIsEnabled(pVM))
1838 {
1839 /*
1840 * Check if in patch code.
1841 */
1842 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
1843 {
1844#ifdef LOG_ENABLED
1845 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1846#endif
1847 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
1848 return VERR_EM_RAW_PATCH_CONFLICT;
1849 }
1850 if ( (pCtx->ss & X86_SEL_RPL) == 0
1851 && !pCtx->eflags.Bits.u1VM
1852 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
1853 {
1854 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1855 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1856 if (VBOX_SUCCESS(rc))
1857 {
1858#ifdef LOG_ENABLED
1859 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1860#endif
1861 DBGFR3DisasInstrCurrentLog(pVM, "Patched privileged instruction");
1862 return VINF_SUCCESS;
1863 }
1864 }
1865 }
1866
1867#ifdef LOG_ENABLED
1868 if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
1869 {
1870 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1871 DBGFR3DisasInstrCurrentLog(pVM, "Privileged instr: ");
1872 }
1873#endif
1874
1875 /*
1876 * Instruction statistics and logging.
1877 */
1878 DISCPUSTATE Cpu;
1879 int rc;
1880
1881 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->rip, &Cpu, "PRIV: ");
1882 if (VBOX_SUCCESS(rc))
1883 {
1884#ifdef VBOX_WITH_STATISTICS
1885 PEMSTATS pStats = pVM->em.s.CTX_SUFF(pStats);
1886 switch (Cpu.pCurInstr->opcode)
1887 {
1888 case OP_INVLPG:
1889 STAM_COUNTER_INC(&pStats->StatInvlpg);
1890 break;
1891 case OP_IRET:
1892 STAM_COUNTER_INC(&pStats->StatIret);
1893 break;
1894 case OP_CLI:
1895 STAM_COUNTER_INC(&pStats->StatCli);
1896 emR3RecordCli(pVM, pCtx->rip);
1897 break;
1898 case OP_STI:
1899 STAM_COUNTER_INC(&pStats->StatSti);
1900 break;
1901 case OP_INSB:
1902 case OP_INSWD:
1903 case OP_IN:
1904 case OP_OUTSB:
1905 case OP_OUTSWD:
1906 case OP_OUT:
1907 AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));
1908 break;
1909
1910 case OP_MOV_CR:
1911 if (Cpu.param1.flags & USE_REG_GEN32)
1912 {
1913 //read
1914 Assert(Cpu.param2.flags & USE_REG_CR);
1915 Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);
1916 STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);
1917 }
1918 else
1919 {
1920 //write
1921 Assert(Cpu.param1.flags & USE_REG_CR);
1922 Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);
1923 STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);
1924 }
1925 break;
1926
1927 case OP_MOV_DR:
1928 STAM_COUNTER_INC(&pStats->StatMovDRx);
1929 break;
1930 case OP_LLDT:
1931 STAM_COUNTER_INC(&pStats->StatMovLldt);
1932 break;
1933 case OP_LIDT:
1934 STAM_COUNTER_INC(&pStats->StatMovLidt);
1935 break;
1936 case OP_LGDT:
1937 STAM_COUNTER_INC(&pStats->StatMovLgdt);
1938 break;
1939 case OP_SYSENTER:
1940 STAM_COUNTER_INC(&pStats->StatSysEnter);
1941 break;
1942 case OP_SYSEXIT:
1943 STAM_COUNTER_INC(&pStats->StatSysExit);
1944 break;
1945 case OP_SYSCALL:
1946 STAM_COUNTER_INC(&pStats->StatSysCall);
1947 break;
1948 case OP_SYSRET:
1949 STAM_COUNTER_INC(&pStats->StatSysRet);
1950 break;
1951 case OP_HLT:
1952 STAM_COUNTER_INC(&pStats->StatHlt);
1953 break;
1954 default:
1955 STAM_COUNTER_INC(&pStats->StatMisc);
1956 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));
1957 break;
1958 }
1959#endif /* VBOX_WITH_STATISTICS */
1960 if ( (pCtx->ss & X86_SEL_RPL) == 0
1961 && !pCtx->eflags.Bits.u1VM
1962 && SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT)
1963 {
1964 uint32_t size;
1965
1966 STAM_PROFILE_START(&pVM->em.s.StatPrivEmu, a);
1967 switch (Cpu.pCurInstr->opcode)
1968 {
1969 case OP_CLI:
1970 pCtx->eflags.u32 &= ~X86_EFL_IF;
1971 Assert(Cpu.opsize == 1);
1972 pCtx->rip += Cpu.opsize;
1973 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1974 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */
1975
1976 case OP_STI:
1977 pCtx->eflags.u32 |= X86_EFL_IF;
1978 EMSetInhibitInterruptsPC(pVM, pCtx->rip + Cpu.opsize);
1979 Assert(Cpu.opsize == 1);
1980 pCtx->rip += Cpu.opsize;
1981 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1982 return VINF_SUCCESS;
1983
1984 case OP_HLT:
1985 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
1986 {
1987 PATMTRANSSTATE enmState;
1988 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
1989
1990 if (enmState == PATMTRANS_OVERWRITTEN)
1991 {
1992 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
1993 Assert(rc == VERR_PATCH_DISABLED);
1994 /* Conflict detected, patch disabled */
1995 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %VGv\n", pCtx->eip));
1996
1997 enmState = PATMTRANS_SAFE;
1998 }
1999
2000 /* The translation had better be successful. Otherwise we can't recover. */
2001 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %VGv\n", pCtx->eip));
2002 if (enmState != PATMTRANS_OVERWRITTEN)
2003 pCtx->eip = pOrgInstrGC;
2004 }
2005 /* no break; we could just return VINF_EM_HALT here */
2006
2007 case OP_MOV_CR:
2008 case OP_MOV_DR:
2009#ifdef LOG_ENABLED
2010 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2011 {
2012 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
2013 DBGFR3DisasInstrCurrentLog(pVM, "Privileged instr: ");
2014 }
2015#endif
2016
2017 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
2018 if (VBOX_SUCCESS(rc))
2019 {
2020 pCtx->rip += Cpu.opsize;
2021 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
2022
2023 if ( Cpu.pCurInstr->opcode == OP_MOV_CR
2024 && Cpu.param1.flags == USE_REG_CR /* write */
2025 )
2026 {
2027 /* Deal with CR0 updates inside patch code that force
2028 * us to go to the recompiler.
2029 */
2030 if ( PATMIsPatchGCAddr(pVM, pCtx->rip)
2031 && (pCtx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))
2032 {
2033 PATMTRANSSTATE enmState;
2034 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->rip, &enmState);
2035
2036 Assert(pCtx->eflags.Bits.u1IF == 0);
2037 Log(("Force recompiler switch due to cr0 (%VGp) update\n", pCtx->cr0));
2038 if (enmState == PATMTRANS_OVERWRITTEN)
2039 {
2040 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
2041 Assert(rc == VERR_PATCH_DISABLED);
2042 /* Conflict detected, patch disabled */
2043 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %VGv\n", pCtx->rip));
2044 enmState = PATMTRANS_SAFE;
2045 }
2046 /* The translation had better be successful. Otherwise we can't recover. */
2047 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %VGv\n", pCtx->rip));
2048 if (enmState != PATMTRANS_OVERWRITTEN)
2049 pCtx->rip = pOrgInstrGC;
2050 }
2051
2052 /* Reschedule is necessary as the execution/paging mode might have changed. */
2053 return VINF_EM_RESCHEDULE;
2054 }
2055 return rc; /* can return VINF_EM_HALT as well. */
2056 }
2057 AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Vrc\n", rc), rc);
2058 break; /* fall back to the recompiler */
2059 }
2060 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
2061 }
2062 }
2063
2064 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2065 return emR3PatchTrap(pVM, pCtx, VINF_PATM_PATCH_TRAP_GP);
2066
2067 return emR3RawExecuteInstruction(pVM, "PRIV");
2068}
2069
2070
2071/**
2072 * Update the forced rawmode execution modifier.
2073 *
2074 * This function is called when we're returning from the raw-mode loop(s). If we're
2075 * in patch code, it will set a flag forcing execution to be resumed in raw-mode,
2076 * if not in patch code, the flag will be cleared.
2077 *
2078 * We should never interrupt patch code while it's being executed. Cli patches can
2079 * contain big code blocks, but they are always executed with IF=0. Other patches
2080 * replace single instructions and should be atomic.
2081 *
2082 * @returns Updated rc.
2083 *
2084 * @param pVM The VM handle.
2085 * @param pCtx The guest CPU context.
2086 * @param rc The result code.
2087 */
2088DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PCPUMCTX pCtx, int rc)
2089{
2090 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */
2091 {
2092 /* ignore reschedule attempts. */
2093 switch (rc)
2094 {
2095 case VINF_EM_RESCHEDULE:
2096 case VINF_EM_RESCHEDULE_REM:
2097 rc = VINF_SUCCESS;
2098 break;
2099 }
2100 pVM->em.s.fForceRAW = true;
2101 }
2102 else
2103 pVM->em.s.fForceRAW = false;
2104 return rc;
2105}
2106
2107
2108/**
2109 * Process a subset of the raw-mode return code.
2110 *
2111 * Since we have to share this with raw-mode single stepping, this inline
2112 * function has been created to avoid code duplication.
2113 *
2114 * @returns VINF_SUCCESS if it's ok to continue raw mode.
2115 * @returns VBox status code to return to the EM main loop.
2116 *
2117 * @param pVM The VM handle
2118 * @param rc The return code.
2119 * @param pCtx The guest cpu context.
2120 */
2121DECLINLINE(int) emR3RawHandleRC(PVM pVM, PCPUMCTX pCtx, int rc)
2122{
2123 switch (rc)
2124 {
2125 /*
2126 * Common & simple ones.
2127 */
2128 case VINF_SUCCESS:
2129 break;
2130 case VINF_EM_RESCHEDULE_RAW:
2131 case VINF_EM_RESCHEDULE_HWACC:
2132 case VINF_EM_RAW_INTERRUPT:
2133 case VINF_EM_RAW_TO_R3:
2134 case VINF_EM_RAW_TIMER_PENDING:
2135 case VINF_EM_PENDING_REQUEST:
2136 rc = VINF_SUCCESS;
2137 break;
2138
2139 /*
2140 * Privileged instruction.
2141 */
2142 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2143 case VINF_PATM_PATCH_TRAP_GP:
2144 rc = emR3RawPrivileged(pVM);
2145 break;
2146
2147 /*
2148 * Got a trap which needs dispatching.
2149 */
2150 case VINF_EM_RAW_GUEST_TRAP:
2151 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2152 {
2153 AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVM)));
2154 rc = VERR_EM_RAW_PATCH_CONFLICT;
2155 break;
2156 }
2157 rc = emR3RawGuestTrap(pVM);
2158 break;
2159
2160 /*
2161 * Trap in patch code.
2162 */
2163 case VINF_PATM_PATCH_TRAP_PF:
2164 case VINF_PATM_PATCH_INT3:
2165 rc = emR3PatchTrap(pVM, pCtx, rc);
2166 break;
2167
2168 case VINF_PATM_DUPLICATE_FUNCTION:
2169 Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2170 rc = PATMR3DuplicateFunctionRequest(pVM, pCtx);
2171 AssertRC(rc);
2172 rc = VINF_SUCCESS;
2173 break;
2174
2175 case VINF_PATM_CHECK_PATCH_PAGE:
2176 rc = PATMR3HandleMonitoredPage(pVM);
2177 AssertRC(rc);
2178 rc = VINF_SUCCESS;
2179 break;
2180
2181 /*
2182 * Patch manager.
2183 */
2184 case VERR_EM_RAW_PATCH_CONFLICT:
2185 AssertReleaseMsgFailed(("%Vrc handling is not yet implemented\n", rc));
2186 break;
2187
2188#ifdef VBOX_WITH_VMI
2189 /*
2190 * PARAV function.
2191 */
2192 case VINF_EM_RESCHEDULE_PARAV:
2193 rc = PARAVCallFunction(pVM);
2194 break;
2195#endif
2196
2197 /*
2198 * Memory mapped I/O access - attempt to patch the instruction
2199 */
2200 case VINF_PATM_HC_MMIO_PATCH_READ:
2201 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
2202 PATMFL_MMIO_ACCESS | ((SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0));
2203 if (VBOX_FAILURE(rc))
2204 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2205 break;
2206
2207 case VINF_PATM_HC_MMIO_PATCH_WRITE:
2208 AssertFailed(); /* not yet implemented. */
2209 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2210 break;
2211
2212 /*
2213 * Conflict or out of page tables.
2214 *
2215 * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
2216 * do here is to execute the pending forced actions.
2217 */
2218 case VINF_PGM_SYNC_CR3:
2219 AssertMsg(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL),
2220 ("VINF_PGM_SYNC_CR3 and no VM_FF_PGM_SYNC_CR3*!\n"));
2221 rc = VINF_SUCCESS;
2222 break;
2223
2224 /*
2225 * Paging mode change.
2226 */
2227 case VINF_PGM_CHANGE_MODE:
2228 rc = PGMChangeMode(pVM, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2229 if (VBOX_SUCCESS(rc))
2230 rc = VINF_EM_RESCHEDULE;
2231 break;
2232
2233 /*
2234 * CSAM wants to perform a task in ring-3. It has set an FF action flag.
2235 */
2236 case VINF_CSAM_PENDING_ACTION:
2237 rc = VINF_SUCCESS;
2238 break;
2239
2240 /*
2241 * Invoked Interrupt gate - must directly (!) go to the recompiler.
2242 */
2243 case VINF_EM_RAW_INTERRUPT_PENDING:
2244 case VINF_EM_RAW_RING_SWITCH_INT:
2245 Assert(TRPMHasTrap(pVM));
2246 Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2247
2248 if (TRPMHasTrap(pVM))
2249 {
2250 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
2251 uint8_t u8Interrupt = TRPMGetTrapNo(pVM);
2252 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
2253 {
2254 CSAMR3CheckGates(pVM, u8Interrupt, 1);
2255 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
2256 /* Note: If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
2257 }
2258 }
2259 rc = VINF_EM_RESCHEDULE_REM;
2260 break;
2261
2262 /*
2263 * Other ring switch types.
2264 */
2265 case VINF_EM_RAW_RING_SWITCH:
2266 rc = emR3RawRingSwitch(pVM);
2267 break;
2268
2269 /*
2270 * REMGCNotifyInvalidatePage() failed because of overflow.
2271 */
2272 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
2273 Assert((pCtx->ss & X86_SEL_RPL) != 1);
2274 REMR3ReplayInvalidatedPages(pVM);
2275 rc = VINF_SUCCESS;
2276 break;
2277
2278 /*
2279 * I/O Port access - emulate the instruction.
2280 */
2281 case VINF_IOM_HC_IOPORT_READ:
2282 case VINF_IOM_HC_IOPORT_WRITE:
2283 rc = emR3RawExecuteIOInstruction(pVM);
2284 break;
2285
2286 /*
2287 * Memory mapped I/O access - emulate the instruction.
2288 */
2289 case VINF_IOM_HC_MMIO_READ:
2290 case VINF_IOM_HC_MMIO_WRITE:
2291 case VINF_IOM_HC_MMIO_READ_WRITE:
2292 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2293 break;
2294
2295 /*
2296 * Execute instruction.
2297 */
2298 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
2299 rc = emR3RawExecuteInstruction(pVM, "LDT FAULT: ");
2300 break;
2301 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
2302 rc = emR3RawExecuteInstruction(pVM, "GDT FAULT: ");
2303 break;
2304 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
2305 rc = emR3RawExecuteInstruction(pVM, "IDT FAULT: ");
2306 break;
2307 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
2308 rc = emR3RawExecuteInstruction(pVM, "TSS FAULT: ");
2309 break;
2310 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
2311 rc = emR3RawExecuteInstruction(pVM, "PD FAULT: ");
2312 break;
2313
2314 case VINF_EM_RAW_EMULATE_INSTR_HLT:
2315 /** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
2316 rc = emR3RawPrivileged(pVM);
2317 break;
2318
2319 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
2320 rc = emR3RawExecuteInstruction(pVM, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
2321 break;
2322
2323 case VINF_EM_RAW_EMULATE_INSTR:
2324 case VINF_PATCH_EMULATE_INSTR:
2325 rc = emR3RawExecuteInstruction(pVM, "EMUL: ");
2326 break;
2327
2328 /*
2329 * Stale selector and iret traps => REM.
2330 */
2331 case VINF_EM_RAW_STALE_SELECTOR:
2332 case VINF_EM_RAW_IRET_TRAP:
2333 /* We will not go to the recompiler if EIP points to patch code. */
2334 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2335 {
2336 pCtx->eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pCtx->eip, 0);
2337 }
2338 LogFlow(("emR3RawHandleRC: %Vrc -> %Vrc\n", rc, VINF_EM_RESCHEDULE_REM));
2339 rc = VINF_EM_RESCHEDULE_REM;
2340 break;
2341
2342 /*
2343 * Up a level.
2344 */
2345 case VINF_EM_TERMINATE:
2346 case VINF_EM_OFF:
2347 case VINF_EM_RESET:
2348 case VINF_EM_SUSPEND:
2349 case VINF_EM_HALT:
2350 case VINF_EM_RESUME:
2351 case VINF_EM_RESCHEDULE:
2352 case VINF_EM_RESCHEDULE_REM:
2353 break;
2354
2355 /*
2356 * Up a level and invoke the debugger.
2357 */
2358 case VINF_EM_DBG_STEPPED:
2359 case VINF_EM_DBG_BREAKPOINT:
2360 case VINF_EM_DBG_STEP:
2361 case VINF_EM_DBG_HYPER_ASSERTION:
2362 case VINF_EM_DBG_HYPER_BREAKPOINT:
2363 case VINF_EM_DBG_HYPER_STEPPED:
2364 case VINF_EM_DBG_STOP:
2365 break;
2366
2367 /*
2368 * Up a level, dump and debug.
2369 */
2370 case VERR_TRPM_DONT_PANIC:
2371 case VERR_TRPM_PANIC:
2372 break;
2373
2374 case VERR_VMX_INVALID_VMCS_FIELD:
2375 case VERR_VMX_INVALID_VMCS_PTR:
2376 case VERR_VMX_INVALID_VMXON_PTR:
2377 case VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE:
2378 case VERR_VMX_UNEXPECTED_EXCEPTION:
2379 case VERR_VMX_UNEXPECTED_EXIT_CODE:
2380 case VERR_VMX_INVALID_GUEST_STATE:
2381 HWACCMR3CheckError(pVM, rc);
2382 break;
2383 /*
2384 * Anything which is not known to us means an internal error
2385 * and the termination of the VM!
2386 */
2387 default:
2388 AssertMsgFailed(("Unknown GC return code: %Vra\n", rc));
2389 break;
2390 }
2391 return rc;
2392}
2393
2394
2395/**
2396 * Check for pending raw actions
2397 *
2398 * @returns VBox status code.
2399 * @param pVM The VM to operate on.
2400 */
2401VMMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM)
2402{
2403 return emR3RawForcedActions(pVM, pVM->em.s.pCtx);
2404}
2405
2406
2407/**
2408 * Process raw-mode specific forced actions.
2409 *
2410 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
2411 *
2412 * @returns VBox status code.
2413 * Only the normal success/failure stuff, no VINF_EM_*.
2414 * @param pVM The VM handle.
2415 * @param pCtx The guest CPUM register context.
2416 */
2417static int emR3RawForcedActions(PVM pVM, PCPUMCTX pCtx)
2418{
2419 /*
2420 * Note that the order is *vitally* important!
2421 * Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.
2422 */
2423
2424
2425 /*
2426 * Sync selector tables.
2427 */
2428 if (VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT))
2429 {
2430 int rc = SELMR3UpdateFromCPUM(pVM);
2431 if (VBOX_FAILURE(rc))
2432 return rc;
2433 }
2434
2435 /*
2436 * Sync IDT.
2437 */
2438 if (VM_FF_ISSET(pVM, VM_FF_TRPM_SYNC_IDT))
2439 {
2440 int rc = TRPMR3SyncIDT(pVM);
2441 if (VBOX_FAILURE(rc))
2442 return rc;
2443 }
2444
2445 /*
2446 * Sync TSS.
2447 */
2448 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
2449 {
2450 int rc = SELMR3SyncTSS(pVM);
2451 if (VBOX_FAILURE(rc))
2452 return rc;
2453 }
2454
2455 /*
2456 * Sync page directory.
2457 */
2458 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
2459 {
2460 int rc = PGMSyncCR3(pVM, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
2461 if (VBOX_FAILURE(rc))
2462 return rc;
2463
2464 Assert(!VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT));
2465
2466 /* Prefetch pages for EIP and ESP */
2467 /** @todo This is rather expensive. Should investigate if it really helps at all. */
2468 rc = PGMPrefetchPage(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
2469 if (rc == VINF_SUCCESS)
2470 rc = PGMPrefetchPage(pVM, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
2471 if (rc != VINF_SUCCESS)
2472 {
2473 if (rc != VINF_PGM_SYNC_CR3)
2474 return rc;
2475 rc = PGMSyncCR3(pVM, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
2476 if (VBOX_FAILURE(rc))
2477 return rc;
2478 }
2479 /** @todo maybe prefetch the supervisor stack page as well */
2480 }
2481
2482 /*
2483 * Allocate handy pages (just in case the above actions have consumed some pages).
2484 */
2485 if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
2486 {
2487 int rc = PGMR3PhysAllocateHandyPages(pVM);
2488 if (VBOX_FAILURE(rc))
2489 return rc;
2490 }
2491
2492 return VINF_SUCCESS;
2493}
2494
2495
2496/**
2497 * Executes raw code.
2498 *
2499 * This function contains the raw-mode version of the inner
2500 * execution loop (the outer loop being in EMR3ExecuteVM()).
2501 *
2502 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
2503 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2504 *
2505 * @param pVM VM handle.
2506 * @param pfFFDone Where to store an indicator telling whether or not
2507 * FFs were done before returning.
2508 */
2509static int emR3RawExecute(PVM pVM, bool *pfFFDone)
2510{
2511 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatRAWTotal, a);
2512
2513 int rc = VERR_INTERNAL_ERROR;
2514 PCPUMCTX pCtx = pVM->em.s.pCtx;
2515 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
2516 pVM->em.s.fForceRAW = false;
2517 *pfFFDone = false;
2518
2519
2520 /*
2521 *
2522 * Spin till we get a forced action or raw mode status code resulting in
2523 * in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.
2524 *
2525 */
2526 for (;;)
2527 {
2528 STAM_PROFILE_ADV_START(&pVM->em.s.StatRAWEntry, b);
2529
2530 /*
2531 * Check various preconditions.
2532 */
2533#ifdef VBOX_STRICT
2534 Assert(REMR3QueryPendingInterrupt(pVM) == REM_NO_PENDING_IRQ);
2535 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);
2536 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
2537 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
2538 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
2539 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
2540 && PGMR3MapHasConflicts(pVM, pCtx->cr3, pVM->fRawR0Enabled))
2541 {
2542 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2543 return VERR_INTERNAL_ERROR;
2544 }
2545#endif /* VBOX_STRICT */
2546
2547 /*
2548 * Process high priority pre-execution raw-mode FFs.
2549 */
2550 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2551 {
2552 rc = emR3RawForcedActions(pVM, pCtx);
2553 if (VBOX_FAILURE(rc))
2554 break;
2555 }
2556
2557 /*
2558 * If we're going to execute ring-0 code, the guest state needs to
2559 * be modified a bit and some of the state components (IF, SS/CS RPL,
2560 * and perhaps EIP) needs to be stored with PATM.
2561 */
2562 rc = CPUMRawEnter(pVM, NULL);
2563 if (rc != VINF_SUCCESS)
2564 {
2565 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWEntry, b);
2566 break;
2567 }
2568
2569 /*
2570 * Scan code before executing it. Don't bother with user mode or V86 code
2571 */
2572 if ( (pCtx->ss & X86_SEL_RPL) <= 1
2573 && !pCtx->eflags.Bits.u1VM
2574 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2575 {
2576 STAM_PROFILE_ADV_SUSPEND(&pVM->em.s.StatRAWEntry, b);
2577 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
2578 STAM_PROFILE_ADV_RESUME(&pVM->em.s.StatRAWEntry, b);
2579 }
2580
2581#ifdef LOG_ENABLED
2582 /*
2583 * Log important stuff before entering GC.
2584 */
2585 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
2586 if (pCtx->eflags.Bits.u1VM)
2587 Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2588 else if ((pCtx->ss & X86_SEL_RPL) == 1)
2589 {
2590 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
2591 Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));
2592 }
2593 else if ((pCtx->ss & X86_SEL_RPL) == 3)
2594 Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2595#endif /* LOG_ENABLED */
2596
2597
2598
2599 /*
2600 * Execute the code.
2601 */
2602 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWEntry, b);
2603 STAM_PROFILE_START(&pVM->em.s.StatRAWExec, c);
2604 VMMR3Unlock(pVM);
2605 rc = VMMR3RawRunGC(pVM);
2606 VMMR3Lock(pVM);
2607 STAM_PROFILE_STOP(&pVM->em.s.StatRAWExec, c);
2608 STAM_PROFILE_ADV_START(&pVM->em.s.StatRAWTail, d);
2609
2610 LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));
2611 LogFlow(("VMMR3RawRunGC returned %Vrc\n", rc));
2612
2613
2614
2615 /*
2616 * Restore the real CPU state and deal with high priority post
2617 * execution FFs before doing anything else.
2618 */
2619 rc = CPUMRawLeave(pVM, NULL, rc);
2620 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
2621 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
2622 rc = emR3HighPriorityPostForcedActions(pVM, rc);
2623
2624#ifdef VBOX_STRICT
2625 /*
2626 * Assert TSS consistency & rc vs patch code.
2627 */
2628 if ( !VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_TSS | VM_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
2629 && EMIsRawRing0Enabled(pVM))
2630 SELMR3CheckTSS(pVM);
2631 switch (rc)
2632 {
2633 case VINF_SUCCESS:
2634 case VINF_EM_RAW_INTERRUPT:
2635 case VINF_PATM_PATCH_TRAP_PF:
2636 case VINF_PATM_PATCH_TRAP_GP:
2637 case VINF_PATM_PATCH_INT3:
2638 case VINF_PATM_CHECK_PATCH_PAGE:
2639 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2640 case VINF_EM_RAW_GUEST_TRAP:
2641 case VINF_EM_RESCHEDULE_RAW:
2642 break;
2643
2644 default:
2645 if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))
2646 LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %VRv for reason %Vrc\n", (RTRCPTR)CPUMGetGuestEIP(pVM), rc));
2647 break;
2648 }
2649 /*
2650 * Let's go paranoid!
2651 */
2652 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
2653 && PGMR3MapHasConflicts(pVM, pCtx->cr3, pVM->fRawR0Enabled))
2654 {
2655 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2656 return VERR_INTERNAL_ERROR;
2657 }
2658#endif /* VBOX_STRICT */
2659
2660 /*
2661 * Process the returned status code.
2662 */
2663 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2664 {
2665 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2666 break;
2667 }
2668 rc = emR3RawHandleRC(pVM, pCtx, rc);
2669 if (rc != VINF_SUCCESS)
2670 {
2671 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
2672 if (rc != VINF_SUCCESS)
2673 {
2674 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2675 break;
2676 }
2677 }
2678
2679 /*
2680 * Check and execute forced actions.
2681 */
2682#ifdef VBOX_HIGH_RES_TIMERS_HACK
2683 TMTimerPoll(pVM);
2684#endif
2685 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2686 if (VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2687 {
2688 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
2689
2690 STAM_REL_PROFILE_ADV_SUSPEND(&pVM->em.s.StatRAWTotal, a);
2691 rc = emR3ForcedActions(pVM, rc);
2692 STAM_REL_PROFILE_ADV_RESUME(&pVM->em.s.StatRAWTotal, a);
2693 if ( rc != VINF_SUCCESS
2694 && rc != VINF_EM_RESCHEDULE_RAW)
2695 {
2696 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
2697 if (rc != VINF_SUCCESS)
2698 {
2699 *pfFFDone = true;
2700 break;
2701 }
2702 }
2703 }
2704 }
2705
2706 /*
2707 * Return to outer loop.
2708 */
2709#if defined(LOG_ENABLED) && defined(DEBUG)
2710 RTLogFlush(NULL);
2711#endif
2712 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTotal, a);
2713 return rc;
2714}
2715
2716
2717/**
2718 * Executes hardware accelerated raw code. (Intel VMX & AMD SVM)
2719 *
2720 * This function contains the raw-mode version of the inner
2721 * execution loop (the outer loop being in EMR3ExecuteVM()).
2722 *
2723 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
2724 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2725 *
2726 * @param pVM VM handle.
2727 * @param pfFFDone Where to store an indicator telling whether or not
2728 * FFs were done before returning.
2729 */
2730static int emR3HwAccExecute(PVM pVM, bool *pfFFDone)
2731{
2732 int rc = VERR_INTERNAL_ERROR;
2733 PCPUMCTX pCtx = pVM->em.s.pCtx;
2734
2735 LogFlow(("emR3HwAccExecute: (cs:eip=%04x:%VGv)\n", pCtx->cs, pCtx->rip));
2736 *pfFFDone = false;
2737
2738 STAM_COUNTER_INC(&pVM->em.s.StatHwAccExecuteEntry);
2739
2740 /*
2741 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
2742 */
2743 for (;;)
2744 {
2745 STAM_PROFILE_ADV_START(&pVM->em.s.StatHwAccEntry, a);
2746
2747 /*
2748 * Check various preconditions.
2749 */
2750 VM_FF_CLEAR(pVM, (VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
2751
2752 /*
2753 * Process high priority pre-execution raw-mode FFs.
2754 */
2755 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2756 {
2757 rc = emR3RawForcedActions(pVM, pCtx);
2758 if (VBOX_FAILURE(rc))
2759 break;
2760 }
2761
2762#ifdef LOG_ENABLED
2763 /*
2764 * Log important stuff before entering GC.
2765 */
2766 if (TRPMHasTrap(pVM))
2767 Log(("Pending hardware interrupt=0x%x cs:eip=%04X:%VGv\n", TRPMGetTrapNo(pVM), pCtx->cs, pCtx->rip));
2768
2769 uint32_t cpl = CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx));
2770 if (pCtx->eflags.Bits.u1VM)
2771 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
2772 else if (CPUMIsGuestIn64BitCode(pVM, CPUMCTX2CORE(pCtx)))
2773 Log(("HWR%d: %04X:%VGv ESP=%VGv IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2774 else
2775 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2776#endif /* LOG_ENABLED */
2777
2778 /*
2779 * Execute the code.
2780 */
2781 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatHwAccEntry, a);
2782 STAM_PROFILE_START(&pVM->em.s.StatHwAccExec, x);
2783 VMMR3Unlock(pVM);
2784 rc = VMMR3HwAccRunGC(pVM);
2785 VMMR3Lock(pVM);
2786 STAM_PROFILE_STOP(&pVM->em.s.StatHwAccExec, x);
2787
2788 /*
2789 * Deal with high priority post execution FFs before doing anything else.
2790 */
2791 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
2792 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
2793 rc = emR3HighPriorityPostForcedActions(pVM, rc);
2794
2795 /*
2796 * Process the returned status code.
2797 */
2798 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2799 break;
2800
2801 rc = emR3RawHandleRC(pVM, pCtx, rc);
2802 if (rc != VINF_SUCCESS)
2803 break;
2804
2805 /*
2806 * Check and execute forced actions.
2807 */
2808#ifdef VBOX_HIGH_RES_TIMERS_HACK
2809 TMTimerPoll(pVM);
2810#endif
2811 if (VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK))
2812 {
2813 rc = emR3ForcedActions(pVM, rc);
2814 if ( rc != VINF_SUCCESS
2815 && rc != VINF_EM_RESCHEDULE_HWACC)
2816 {
2817 *pfFFDone = true;
2818 break;
2819 }
2820 }
2821 }
2822 /*
2823 * Return to outer loop.
2824 */
2825#if defined(LOG_ENABLED) && defined(DEBUG)
2826 RTLogFlush(NULL);
2827#endif
2828 return rc;
2829}
2830
2831
2832/**
2833 * Decides whether to execute RAW, HWACC or REM.
2834 *
2835 * @returns new EM state
2836 * @param pVM The VM.
2837 * @param pCtx The CPU context.
2838 */
2839DECLINLINE(EMSTATE) emR3Reschedule(PVM pVM, PCPUMCTX pCtx)
2840{
2841 /*
2842 * When forcing raw-mode execution, things are simple.
2843 */
2844 if (pVM->em.s.fForceRAW)
2845 return EMSTATE_RAW;
2846
2847 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2848 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2849 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2850
2851 X86EFLAGS EFlags = pCtx->eflags;
2852 if (HWACCMIsEnabled(pVM))
2853 {
2854 /* Hardware accelerated raw-mode:
2855 *
2856 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
2857 */
2858 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
2859 return EMSTATE_HWACC;
2860
2861 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
2862 * off monitoring features essential for raw mode! */
2863 return EMSTATE_REM;
2864 }
2865
2866 /*
2867 * Standard raw-mode:
2868 *
2869 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
2870 * or 32 bits protected mode ring 0 code
2871 *
2872 * The tests are ordered by the likelyhood of being true during normal execution.
2873 */
2874 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
2875 {
2876 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
2877 return EMSTATE_REM;
2878 }
2879
2880#ifndef VBOX_RAW_V86
2881 if (EFlags.u32 & X86_EFL_VM) {
2882 Log2(("raw mode refused: VM_MASK\n"));
2883 return EMSTATE_REM;
2884 }
2885#endif
2886
2887 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
2888 uint32_t u32CR0 = pCtx->cr0;
2889 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
2890 {
2891 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
2892 return EMSTATE_REM;
2893 }
2894
2895 if (pCtx->cr4 & X86_CR4_PAE)
2896 {
2897 uint32_t u32Dummy, u32Features;
2898
2899 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
2900 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
2901 return EMSTATE_REM;
2902 }
2903
2904 unsigned uSS = pCtx->ss;
2905 if ( pCtx->eflags.Bits.u1VM
2906 || (uSS & X86_SEL_RPL) == 3)
2907 {
2908 if (!EMIsRawRing3Enabled(pVM))
2909 return EMSTATE_REM;
2910
2911 if (!(EFlags.u32 & X86_EFL_IF))
2912 {
2913 Log2(("raw mode refused: IF (RawR3)\n"));
2914 return EMSTATE_REM;
2915 }
2916
2917 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
2918 {
2919 Log2(("raw mode refused: CR0.WP + RawR0\n"));
2920 return EMSTATE_REM;
2921 }
2922 }
2923 else
2924 {
2925 if (!EMIsRawRing0Enabled(pVM))
2926 return EMSTATE_REM;
2927
2928 /* Only ring 0 supervisor code. */
2929 if ((uSS & X86_SEL_RPL) != 0)
2930 {
2931 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
2932 return EMSTATE_REM;
2933 }
2934
2935 // Let's start with pure 32 bits ring 0 code first
2936 /** @todo What's pure 32-bit mode? flat? */
2937 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
2938 || !(pCtx->csHid.Attr.n.u1DefBig))
2939 {
2940 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
2941 return EMSTATE_REM;
2942 }
2943
2944 /* Write protection muts be turned on, or else the guest can overwrite our hypervisor code and data. */
2945 if (!(u32CR0 & X86_CR0_WP))
2946 {
2947 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
2948 return EMSTATE_REM;
2949 }
2950
2951 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
2952 {
2953 Log2(("raw r0 mode forced: patch code\n"));
2954 return EMSTATE_RAW;
2955 }
2956
2957#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
2958 if (!(EFlags.u32 & X86_EFL_IF))
2959 {
2960 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
2961 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
2962 return EMSTATE_REM;
2963 }
2964#endif
2965
2966 /** @todo still necessary??? */
2967 if (EFlags.Bits.u2IOPL != 0)
2968 {
2969 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
2970 return EMSTATE_REM;
2971 }
2972 }
2973
2974 Assert(PGMPhysIsA20Enabled(pVM));
2975 return EMSTATE_RAW;
2976}
2977
2978
2979/**
2980 * Executes all high priority post execution force actions.
2981 *
2982 * @returns rc or a fatal status code.
2983 *
2984 * @param pVM VM handle.
2985 * @param rc The current rc.
2986 */
2987static int emR3HighPriorityPostForcedActions(PVM pVM, int rc)
2988{
2989 if (VM_FF_ISSET(pVM, VM_FF_PDM_CRITSECT))
2990 PDMR3CritSectFF(pVM);
2991
2992 if (VM_FF_ISSET(pVM, VM_FF_CSAM_PENDING_ACTION))
2993 CSAMR3DoPendingAction(pVM);
2994
2995 return rc;
2996}
2997
2998
2999/**
3000 * Executes all pending forced actions.
3001 *
3002 * Forced actions can cause execution delays and execution
3003 * rescheduling. The first we deal with using action priority, so
3004 * that for instance pending timers aren't scheduled and ran until
3005 * right before execution. The rescheduling we deal with using
3006 * return codes. The same goes for VM termination, only in that case
3007 * we exit everything.
3008 *
3009 * @returns VBox status code of equal or greater importance/severity than rc.
3010 * The most important ones are: VINF_EM_RESCHEDULE,
3011 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
3012 *
3013 * @param pVM VM handle.
3014 * @param rc The current rc.
3015 *
3016 */
3017static int emR3ForcedActions(PVM pVM, int rc)
3018{
3019 STAM_REL_PROFILE_START(&pVM->em.s.StatForcedActions, a);
3020#ifdef VBOX_STRICT
3021 int rcIrq = VINF_SUCCESS;
3022#endif
3023 int rc2;
3024#define UPDATE_RC() \
3025 do { \
3026 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Vra\n", rc2)); \
3027 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
3028 break; \
3029 if (!rc || rc2 < rc) \
3030 rc = rc2; \
3031 } while (0)
3032
3033 /*
3034 * Post execution chunk first.
3035 */
3036 if (VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK))
3037 {
3038 /*
3039 * Termination request.
3040 */
3041 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
3042 {
3043 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3044 STAM_REL_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3045 return VINF_EM_TERMINATE;
3046 }
3047
3048 /*
3049 * Debugger Facility polling.
3050 */
3051 if (VM_FF_ISSET(pVM, VM_FF_DBGF))
3052 {
3053 rc2 = DBGFR3VMMForcedAction(pVM);
3054 UPDATE_RC();
3055 }
3056
3057 /*
3058 * Postponed reset request.
3059 */
3060 if (VM_FF_ISSET(pVM, VM_FF_RESET))
3061 {
3062 rc2 = VMR3Reset(pVM);
3063 UPDATE_RC();
3064 VM_FF_CLEAR(pVM, VM_FF_RESET);
3065 }
3066
3067 /*
3068 * CSAM page scanning.
3069 */
3070 if (VM_FF_ISSET(pVM, VM_FF_CSAM_SCAN_PAGE))
3071 {
3072 PCPUMCTX pCtx = pVM->em.s.pCtx;
3073
3074 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
3075 Log(("Forced action VM_FF_CSAM_SCAN_PAGE\n"));
3076
3077 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
3078 VM_FF_CLEAR(pVM, VM_FF_CSAM_SCAN_PAGE);
3079 }
3080
3081 /* check that we got them all */
3082 Assert(!(VM_FF_NORMAL_PRIORITY_POST_MASK & ~(VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE)));
3083 }
3084
3085 /*
3086 * Normal priority then.
3087 * (Executed in no particular order.)
3088 */
3089 if (VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_MASK))
3090 {
3091 /*
3092 * PDM Queues are pending.
3093 */
3094 if (VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES))
3095 PDMR3QueueFlushAll(pVM);
3096
3097 /*
3098 * PDM DMA transfers are pending.
3099 */
3100 if (VM_FF_ISSET(pVM, VM_FF_PDM_DMA))
3101 PDMR3DmaRun(pVM);
3102
3103 /*
3104 * Requests from other threads.
3105 */
3106 if (VM_FF_ISSET(pVM, VM_FF_REQUEST))
3107 {
3108 rc2 = VMR3ReqProcessU(pVM->pUVM);
3109 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE)
3110 {
3111 Log2(("emR3ForcedActions: returns %Vrc\n", rc2));
3112 STAM_REL_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3113 return rc2;
3114 }
3115 UPDATE_RC();
3116 }
3117
3118 /* Replay the handler notification changes. */
3119 if (VM_FF_ISSET(pVM, VM_FF_REM_HANDLER_NOTIFY))
3120 REMR3ReplayHandlerNotifications(pVM);
3121
3122 /* check that we got them all */
3123 Assert(!(VM_FF_NORMAL_PRIORITY_MASK & ~(VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY)));
3124 }
3125
3126 /*
3127 * Execute polling function ever so often.
3128 * THIS IS A HACK, IT WILL BE *REPLACED* BY PROPER ASYNC NETWORKING "SOON"!
3129 */
3130 static unsigned cLast = 0;
3131 if (!((++cLast) % 4))
3132 PDMR3Poll(pVM);
3133
3134 /*
3135 * High priority pre execution chunk last.
3136 * (Executed in ascending priority order.)
3137 */
3138 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK))
3139 {
3140 /*
3141 * Timers before interrupts.
3142 */
3143 if (VM_FF_ISSET(pVM, VM_FF_TIMER))
3144 TMR3TimerQueuesDo(pVM);
3145
3146 /*
3147 * The instruction following an emulated STI should *always* be executed!
3148 */
3149 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
3150 {
3151 Log(("VM_FF_EMULATED_STI at %VGv successor %VGv\n", (RTGCPTR)CPUMGetGuestRIP(pVM), EMGetInhibitInterruptsPC(pVM)));
3152 if (CPUMGetGuestEIP(pVM) != EMGetInhibitInterruptsPC(pVM))
3153 {
3154 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
3155 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
3156 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
3157 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
3158 */
3159 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
3160 }
3161 if (HWACCMR3IsActive(pVM))
3162 rc2 = VINF_EM_RESCHEDULE_HWACC;
3163 else
3164 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
3165
3166 UPDATE_RC();
3167 }
3168
3169 /*
3170 * Interrupts.
3171 */
3172 if ( !VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)
3173 && (!rc || rc >= VINF_EM_RESCHEDULE_RAW)
3174 && !TRPMHasTrap(pVM) /* an interrupt could already be scheduled for dispatching in the recompiler. */
3175 && PATMAreInterruptsEnabled(pVM)
3176 && !HWACCMR3IsEventPending(pVM))
3177 {
3178 if (VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
3179 {
3180 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
3181 /** @todo this really isn't nice, should properly handle this */
3182 rc2 = TRPMR3InjectEvent(pVM, TRPM_HARDWARE_INT);
3183#ifdef VBOX_STRICT
3184 rcIrq = rc2;
3185#endif
3186 UPDATE_RC();
3187 }
3188 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
3189 else if (REMR3QueryPendingInterrupt(pVM) != REM_NO_PENDING_IRQ)
3190 {
3191 rc2 = VINF_EM_RESCHEDULE_REM;
3192 UPDATE_RC();
3193 }
3194 }
3195
3196 /*
3197 * Allocate handy pages.
3198 */
3199 if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
3200 {
3201 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3202 UPDATE_RC();
3203 }
3204
3205 /*
3206 * Debugger Facility request.
3207 */
3208 if (VM_FF_ISSET(pVM, VM_FF_DBGF))
3209 {
3210 rc2 = DBGFR3VMMForcedAction(pVM);
3211 UPDATE_RC();
3212 }
3213
3214 /*
3215 * Termination request.
3216 */
3217 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
3218 {
3219 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3220 STAM_REL_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3221 return VINF_EM_TERMINATE;
3222 }
3223
3224#ifdef DEBUG
3225 /*
3226 * Debug, pause the VM.
3227 */
3228 if (VM_FF_ISSET(pVM, VM_FF_DEBUG_SUSPEND))
3229 {
3230 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
3231 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
3232 return VINF_EM_SUSPEND;
3233 }
3234
3235#endif
3236 /* check that we got them all */
3237 Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_DBGF | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_INHIBIT_INTERRUPTS | VM_FF_PGM_NEED_HANDY_PAGES)));
3238 }
3239
3240#undef UPDATE_RC
3241 Log2(("emR3ForcedActions: returns %Vrc\n", rc));
3242 STAM_REL_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3243 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
3244 return rc;
3245}
3246
3247
3248/**
3249 * Execute VM.
3250 *
3251 * This function is the main loop of the VM. The emulation thread
3252 * calls this function when the VM has been successfully constructed
3253 * and we're ready for executing the VM.
3254 *
3255 * Returning from this function means that the VM is turned off or
3256 * suspended (state already saved) and deconstruction in next in line.
3257 *
3258 * All interaction from other thread are done using forced actions
3259 * and signaling of the wait object.
3260 *
3261 * @returns VBox status code.
3262 * @param pVM The VM to operate on.
3263 */
3264VMMR3DECL(int) EMR3ExecuteVM(PVM pVM)
3265{
3266 LogFlow(("EMR3ExecuteVM: pVM=%p enmVMState=%d enmState=%d (%s) fForceRAW=%d\n", pVM, pVM->enmVMState,
3267 pVM->em.s.enmState, EMR3GetStateName(pVM->em.s.enmState), pVM->em.s.fForceRAW));
3268 VM_ASSERT_EMT(pVM);
3269 Assert(pVM->em.s.enmState == EMSTATE_NONE || pVM->em.s.enmState == EMSTATE_SUSPENDED);
3270
3271 VMMR3Lock(pVM);
3272
3273 int rc = setjmp(pVM->em.s.u.FatalLongJump);
3274 if (rc == 0)
3275 {
3276 /*
3277 * Start the virtual time.
3278 */
3279 rc = TMVirtualResume(pVM);
3280 Assert(rc == VINF_SUCCESS);
3281 rc = TMCpuTickResume(pVM);
3282 Assert(rc == VINF_SUCCESS);
3283
3284 /*
3285 * The Outer Main Loop.
3286 */
3287 bool fFFDone = false;
3288 rc = VINF_EM_RESCHEDULE;
3289 pVM->em.s.enmState = EMSTATE_REM;
3290 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3291 for (;;)
3292 {
3293 /*
3294 * Before we can schedule anything (we're here because
3295 * scheduling is required) we must service any pending
3296 * forced actions to avoid any pending action causing
3297 * immediate rescheduling upon entering an inner loop
3298 *
3299 * Do forced actions.
3300 */
3301 if ( !fFFDone
3302 && rc != VINF_EM_TERMINATE
3303 && rc != VINF_EM_OFF
3304 && VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK))
3305 {
3306 rc = emR3ForcedActions(pVM, rc);
3307 if ( ( rc == VINF_EM_RESCHEDULE_REM
3308 || rc == VINF_EM_RESCHEDULE_HWACC)
3309 && pVM->em.s.fForceRAW)
3310 rc = VINF_EM_RESCHEDULE_RAW;
3311 }
3312 else if (fFFDone)
3313 fFFDone = false;
3314
3315 /*
3316 * Now what to do?
3317 */
3318 Log2(("EMR3ExecuteVM: rc=%Vrc\n", rc));
3319 switch (rc)
3320 {
3321 /*
3322 * Keep doing what we're currently doing.
3323 */
3324 case VINF_SUCCESS:
3325 break;
3326
3327 /*
3328 * Reschedule - to raw-mode execution.
3329 */
3330 case VINF_EM_RESCHEDULE_RAW:
3331 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVM->em.s.enmState, EMSTATE_RAW));
3332 pVM->em.s.enmState = EMSTATE_RAW;
3333 break;
3334
3335 /*
3336 * Reschedule - to hardware accelerated raw-mode execution.
3337 */
3338 case VINF_EM_RESCHEDULE_HWACC:
3339 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVM->em.s.enmState, EMSTATE_HWACC));
3340 Assert(!pVM->em.s.fForceRAW);
3341 pVM->em.s.enmState = EMSTATE_HWACC;
3342 break;
3343
3344 /*
3345 * Reschedule - to recompiled execution.
3346 */
3347 case VINF_EM_RESCHEDULE_REM:
3348 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVM->em.s.enmState, EMSTATE_REM));
3349 pVM->em.s.enmState = EMSTATE_REM;
3350 break;
3351
3352#ifdef VBOX_WITH_VMI
3353 /*
3354 * Reschedule - parav call.
3355 */
3356 case VINF_EM_RESCHEDULE_PARAV:
3357 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_PARAV: %d -> %d (EMSTATE_PARAV)\n", pVM->em.s.enmState, EMSTATE_PARAV));
3358 pVM->em.s.enmState = EMSTATE_PARAV;
3359 break;
3360#endif
3361
3362 /*
3363 * Resume.
3364 */
3365 case VINF_EM_RESUME:
3366 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVM->em.s.enmState));
3367 /* fall through and get scheduled. */
3368
3369 /*
3370 * Reschedule.
3371 */
3372 case VINF_EM_RESCHEDULE:
3373 {
3374 EMSTATE enmState = emR3Reschedule(pVM, pVM->em.s.pCtx);
3375 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVM->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3376 pVM->em.s.enmState = enmState;
3377 break;
3378 }
3379
3380 /*
3381 * Halted.
3382 */
3383 case VINF_EM_HALT:
3384 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVM->em.s.enmState, EMSTATE_HALTED));
3385 pVM->em.s.enmState = EMSTATE_HALTED;
3386 break;
3387
3388 /*
3389 * Suspend.
3390 */
3391 case VINF_EM_SUSPEND:
3392 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVM->em.s.enmState, EMSTATE_SUSPENDED));
3393 pVM->em.s.enmState = EMSTATE_SUSPENDED;
3394 break;
3395
3396 /*
3397 * Reset.
3398 * We might end up doing a double reset for now, we'll have to clean up the mess later.
3399 */
3400 case VINF_EM_RESET:
3401 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d\n", pVM->em.s.enmState, EMSTATE_REM));
3402 pVM->em.s.enmState = EMSTATE_REM;
3403 break;
3404
3405 /*
3406 * Power Off.
3407 */
3408 case VINF_EM_OFF:
3409 pVM->em.s.enmState = EMSTATE_TERMINATING;
3410 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVM->em.s.enmState, EMSTATE_TERMINATING));
3411 TMVirtualPause(pVM);
3412 TMCpuTickPause(pVM);
3413 VMMR3Unlock(pVM);
3414 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3415 return rc;
3416
3417 /*
3418 * Terminate the VM.
3419 */
3420 case VINF_EM_TERMINATE:
3421 pVM->em.s.enmState = EMSTATE_TERMINATING;
3422 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVM->em.s.enmState, EMSTATE_TERMINATING));
3423 TMVirtualPause(pVM);
3424 TMCpuTickPause(pVM);
3425 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3426 return rc;
3427
3428 /*
3429 * Guest debug events.
3430 */
3431 case VINF_EM_DBG_STEPPED:
3432 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
3433 case VINF_EM_DBG_STOP:
3434 case VINF_EM_DBG_BREAKPOINT:
3435 case VINF_EM_DBG_STEP:
3436 if (pVM->em.s.enmState == EMSTATE_RAW)
3437 {
3438 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
3439 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
3440 }
3441 else
3442 {
3443 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
3444 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
3445 }
3446 break;
3447
3448 /*
3449 * Hypervisor debug events.
3450 */
3451 case VINF_EM_DBG_HYPER_STEPPED:
3452 case VINF_EM_DBG_HYPER_BREAKPOINT:
3453 case VINF_EM_DBG_HYPER_ASSERTION:
3454 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_HYPER));
3455 pVM->em.s.enmState = EMSTATE_DEBUG_HYPER;
3456 break;
3457
3458 /*
3459 * Any error code showing up here other than the ones we
3460 * know and process above are considered to be FATAL.
3461 *
3462 * Unknown warnings and informational status codes are also
3463 * included in this.
3464 */
3465 default:
3466 if (VBOX_SUCCESS(rc))
3467 {
3468 AssertMsgFailed(("Unexpected warning or informational status code %Vra!\n", rc));
3469 rc = VERR_EM_INTERNAL_ERROR;
3470 }
3471 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3472 Log(("EMR3ExecuteVM returns %d\n", rc));
3473 break;
3474 }
3475
3476
3477 /*
3478 * Any waiters can now be woken up
3479 */
3480 VMMR3Unlock(pVM);
3481 VMMR3Lock(pVM);
3482
3483 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x); /* (skip this in release) */
3484 STAM_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3485
3486 /*
3487 * Act on the state.
3488 */
3489 switch (pVM->em.s.enmState)
3490 {
3491 /*
3492 * Execute raw.
3493 */
3494 case EMSTATE_RAW:
3495 rc = emR3RawExecute(pVM, &fFFDone);
3496 break;
3497
3498 /*
3499 * Execute hardware accelerated raw.
3500 */
3501 case EMSTATE_HWACC:
3502 rc = emR3HwAccExecute(pVM, &fFFDone);
3503 break;
3504
3505 /*
3506 * Execute recompiled.
3507 */
3508 case EMSTATE_REM:
3509 rc = emR3RemExecute(pVM, &fFFDone);
3510 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Vrc\n", rc));
3511 break;
3512
3513#ifdef VBOX_WITH_VMI
3514 /*
3515 * Execute PARAV function.
3516 */
3517 case EMSTATE_PARAV:
3518 rc = PARAVCallFunction(pVM);
3519 pVM->em.s.enmState = EMSTATE_REM;
3520 break;
3521#endif
3522
3523 /*
3524 * hlt - execution halted until interrupt.
3525 */
3526 case EMSTATE_HALTED:
3527 {
3528 STAM_REL_PROFILE_START(&pVM->em.s.StatHalted, y);
3529 rc = VMR3WaitHalted(pVM, !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF));
3530 STAM_REL_PROFILE_STOP(&pVM->em.s.StatHalted, y);
3531 break;
3532 }
3533
3534 /*
3535 * Suspended - return to VM.cpp.
3536 */
3537 case EMSTATE_SUSPENDED:
3538 TMVirtualPause(pVM);
3539 TMCpuTickPause(pVM);
3540 VMMR3Unlock(pVM);
3541 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3542 return VINF_EM_SUSPEND;
3543
3544 /*
3545 * Debugging in the guest.
3546 */
3547 case EMSTATE_DEBUG_GUEST_REM:
3548 case EMSTATE_DEBUG_GUEST_RAW:
3549 TMVirtualPause(pVM);
3550 TMCpuTickPause(pVM);
3551 rc = emR3Debug(pVM, rc);
3552 TMVirtualResume(pVM);
3553 TMCpuTickResume(pVM);
3554 Log2(("EMR3ExecuteVM: enmr3Debug -> %Vrc (state %d)\n", rc, pVM->em.s.enmState));
3555 break;
3556
3557 /*
3558 * Debugging in the hypervisor.
3559 */
3560 case EMSTATE_DEBUG_HYPER:
3561 {
3562 TMVirtualPause(pVM);
3563 TMCpuTickPause(pVM);
3564 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3565
3566 rc = emR3Debug(pVM, rc);
3567 Log2(("EMR3ExecuteVM: enmr3Debug -> %Vrc (state %d)\n", rc, pVM->em.s.enmState));
3568 if (rc != VINF_SUCCESS)
3569 {
3570 /* switch to guru meditation mode */
3571 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3572 VMMR3FatalDump(pVM, rc);
3573 return rc;
3574 }
3575
3576 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3577 TMVirtualResume(pVM);
3578 TMCpuTickResume(pVM);
3579 break;
3580 }
3581
3582 /*
3583 * Guru meditation takes place in the debugger.
3584 */
3585 case EMSTATE_GURU_MEDITATION:
3586 {
3587 TMVirtualPause(pVM);
3588 TMCpuTickPause(pVM);
3589 VMMR3FatalDump(pVM, rc);
3590 emR3Debug(pVM, rc);
3591 VMMR3Unlock(pVM);
3592 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3593 return rc;
3594 }
3595
3596 /*
3597 * The states we don't expect here.
3598 */
3599 case EMSTATE_NONE:
3600 case EMSTATE_TERMINATING:
3601 default:
3602 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVM->em.s.enmState));
3603 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3604 TMVirtualPause(pVM);
3605 TMCpuTickPause(pVM);
3606 VMMR3Unlock(pVM);
3607 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3608 return VERR_EM_INTERNAL_ERROR;
3609 }
3610 } /* The Outer Main Loop */
3611 }
3612 else
3613 {
3614 /*
3615 * Fatal error.
3616 */
3617 LogFlow(("EMR3ExecuteVM: returns %Vrc (longjmp / fatal error)\n", rc));
3618 TMVirtualPause(pVM);
3619 TMCpuTickPause(pVM);
3620 VMMR3FatalDump(pVM, rc);
3621 emR3Debug(pVM, rc);
3622 VMMR3Unlock(pVM);
3623 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3624 /** @todo change the VM state! */
3625 return rc;
3626 }
3627
3628 /* (won't ever get here). */
3629 AssertFailed();
3630}
3631
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette