VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 29303

Last change on this file since 29303 was 29250, checked in by vboxsync, 15 years ago

iprt/asm*.h: split out asm-math.h, don't include asm-*.h from asm.h, don't include asm.h from sup.h. Fixed a couple file headers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 98.3 KB
Line 
1/* $Id: EM.cpp 29250 2010-05-09 17:53:58Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/em.h>
39#include <VBox/vmm.h>
40#ifdef VBOX_WITH_VMI
41# include <VBox/parav.h>
42#endif
43#include <VBox/patm.h>
44#include <VBox/csam.h>
45#include <VBox/selm.h>
46#include <VBox/trpm.h>
47#include <VBox/iom.h>
48#include <VBox/dbgf.h>
49#include <VBox/pgm.h>
50#include <VBox/rem.h>
51#include <VBox/tm.h>
52#include <VBox/mm.h>
53#include <VBox/ssm.h>
54#include <VBox/pdmapi.h>
55#include <VBox/pdmcritsect.h>
56#include <VBox/pdmqueue.h>
57#include <VBox/hwaccm.h>
58#include <VBox/patm.h>
59#include "EMInternal.h"
60#include <VBox/vm.h>
61#include <VBox/cpumdis.h>
62#include <VBox/dis.h>
63#include <VBox/disopcode.h>
64#include <VBox/dbgf.h>
65
66#include <iprt/asm.h>
67#include <iprt/string.h>
68#include <iprt/stream.h>
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
75#define EM_NOTIFY_HWACCM
76#endif
77
78
79/*******************************************************************************
80* Internal Functions *
81*******************************************************************************/
82static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
83static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
84static const char *emR3GetStateName(EMSTATE enmState);
85static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
86static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
87static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
88int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
89
90
91/**
92 * Initializes the EM.
93 *
94 * @returns VBox status code.
95 * @param pVM The VM to operate on.
96 */
97VMMR3DECL(int) EMR3Init(PVM pVM)
98{
99 LogFlow(("EMR3Init\n"));
100 /*
101 * Assert alignment and sizes.
102 */
103 AssertCompileMemberAlignment(VM, em.s, 32);
104 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
105 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
106 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
107
108 /*
109 * Init the structure.
110 */
111 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
112 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
113 if (RT_FAILURE(rc))
114 pVM->fRawR3Enabled = true;
115 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
116 if (RT_FAILURE(rc))
117 pVM->fRawR0Enabled = true;
118 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
119
120 /*
121 * Initialize the REM critical section.
122 */
123 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
124 AssertRCReturn(rc, rc);
125
126 /*
127 * Saved state.
128 */
129 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
130 NULL, NULL, NULL,
131 NULL, emR3Save, NULL,
132 NULL, emR3Load, NULL);
133 if (RT_FAILURE(rc))
134 return rc;
135
136 for (VMCPUID i = 0; i < pVM->cCpus; i++)
137 {
138 PVMCPU pVCpu = &pVM->aCpus[i];
139
140 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
141
142 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
143 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
144 pVCpu->em.s.fForceRAW = false;
145
146 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
147 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
148 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
149
150# define EM_REG_COUNTER(a, b, c) \
151 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
152 AssertRC(rc);
153
154# define EM_REG_COUNTER_USED(a, b, c) \
155 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
156 AssertRC(rc);
157
158# define EM_REG_PROFILE(a, b, c) \
159 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
160 AssertRC(rc);
161
162# define EM_REG_PROFILE_ADV(a, b, c) \
163 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
164 AssertRC(rc);
165
166 /*
167 * Statistics.
168 */
169#ifdef VBOX_WITH_STATISTICS
170 PEMSTATS pStats;
171 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
172 if (RT_FAILURE(rc))
173 return rc;
174
175 pVCpu->em.s.pStatsR3 = pStats;
176 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
177 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
178
179 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
180 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
181
182 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
183 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
184
185 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
186 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
187 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
188 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
189 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
190 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
191 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
192 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
193 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
194 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
195 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
196 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
197 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
198 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
199 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
200 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
201 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
202 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
203 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
204 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
205 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
206 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
207 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
208 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
209 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
210 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
211 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
212 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
213 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
214 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
257
258 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
259 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
260
261 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
311
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
340
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
345
346 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
347 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
348 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
349 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
350 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "Number of restarted i/o instructions.");
351 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
352 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
353 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
354 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 read instructions.");
355 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 read instructions.");
356 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 read instructions.");
357 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 read instructions.");
358 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 read instructions.");
359 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 write instructions.");
360 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 write instructions.");
361 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 write instructions.");
362 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 write instructions.");
363 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 write instructions.");
364 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
365 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
366 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
367 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
368 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
369 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
370 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
371 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
372 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
373
374 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
375 pVCpu->em.s.pCliStatTree = 0;
376
377 /* these should be considered for release statistics. */
378 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
379 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
380 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
381 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
382 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
383 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
384 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
385 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
386 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
387 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
388
389#endif /* VBOX_WITH_STATISTICS */
390
391 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
392 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
393 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
394 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
395
396 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
397 }
398
399 return VINF_SUCCESS;
400}
401
402
403/**
404 * Initializes the per-VCPU EM.
405 *
406 * @returns VBox status code.
407 * @param pVM The VM to operate on.
408 */
409VMMR3DECL(int) EMR3InitCPU(PVM pVM)
410{
411 LogFlow(("EMR3InitCPU\n"));
412 return VINF_SUCCESS;
413}
414
415
416/**
417 * Applies relocations to data and code managed by this
418 * component. This function will be called at init and
419 * whenever the VMM need to relocate it self inside the GC.
420 *
421 * @param pVM The VM.
422 */
423VMMR3DECL(void) EMR3Relocate(PVM pVM)
424{
425 LogFlow(("EMR3Relocate\n"));
426 for (VMCPUID i = 0; i < pVM->cCpus; i++)
427 {
428 PVMCPU pVCpu = &pVM->aCpus[i];
429 if (pVCpu->em.s.pStatsR3)
430 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
431 }
432}
433
434
435/**
436 * Reset the EM state for a CPU.
437 *
438 * Called by EMR3Reset and hot plugging.
439 *
440 * @param pVCpu The virtual CPU.
441 */
442VMMR3DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
443{
444 pVCpu->em.s.fForceRAW = false;
445
446 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
447 out of the HALTED state here so that enmPrevState doesn't end up as
448 HALTED when EMR3Execute returns. */
449 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
450 {
451 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
452 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
453 }
454}
455
456
457/**
458 * Reset notification.
459 *
460 * @param pVM The VM handle.
461 */
462VMMR3DECL(void) EMR3Reset(PVM pVM)
463{
464 Log(("EMR3Reset: \n"));
465 for (VMCPUID i = 0; i < pVM->cCpus; i++)
466 EMR3ResetCpu(&pVM->aCpus[i]);
467}
468
469
470/**
471 * Terminates the EM.
472 *
473 * Termination means cleaning up and freeing all resources,
474 * the VM it self is at this point powered off or suspended.
475 *
476 * @returns VBox status code.
477 * @param pVM The VM to operate on.
478 */
479VMMR3DECL(int) EMR3Term(PVM pVM)
480{
481 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
482
483 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
484 return VINF_SUCCESS;
485}
486
487/**
488 * Terminates the per-VCPU EM.
489 *
490 * Termination means cleaning up and freeing all resources,
491 * the VM it self is at this point powered off or suspended.
492 *
493 * @returns VBox status code.
494 * @param pVM The VM to operate on.
495 */
496VMMR3DECL(int) EMR3TermCPU(PVM pVM)
497{
498 return 0;
499}
500
501/**
502 * Execute state save operation.
503 *
504 * @returns VBox status code.
505 * @param pVM VM Handle.
506 * @param pSSM SSM operation handle.
507 */
508static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
509{
510 for (VMCPUID i = 0; i < pVM->cCpus; i++)
511 {
512 PVMCPU pVCpu = &pVM->aCpus[i];
513
514 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
515 AssertRCReturn(rc, rc);
516
517 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
518 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
519 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
520 AssertRCReturn(rc, rc);
521
522 /* Save mwait state. */
523 rc = SSMR3PutU32(pSSM, pVCpu->em.s.mwait.fWait);
524 AssertRCReturn(rc, rc);
525 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMWaitEAX);
526 AssertRCReturn(rc, rc);
527 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMWaitECX);
528 AssertRCReturn(rc, rc);
529 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorEAX);
530 AssertRCReturn(rc, rc);
531 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorECX);
532 AssertRCReturn(rc, rc);
533 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorEDX);
534 AssertRCReturn(rc, rc);
535 }
536 return VINF_SUCCESS;
537}
538
539
540/**
541 * Execute state load operation.
542 *
543 * @returns VBox status code.
544 * @param pVM VM Handle.
545 * @param pSSM SSM operation handle.
546 * @param uVersion Data layout version.
547 * @param uPass The data pass.
548 */
549static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
550{
551 /*
552 * Validate version.
553 */
554 if ( uVersion != EM_SAVED_STATE_VERSION
555 && uVersion != EM_SAVED_STATE_VERSION_PRE_MWAIT
556 && uVersion != EM_SAVED_STATE_VERSION_PRE_SMP)
557 {
558 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
559 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
560 }
561 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
562
563 /*
564 * Load the saved state.
565 */
566 for (VMCPUID i = 0; i < pVM->cCpus; i++)
567 {
568 PVMCPU pVCpu = &pVM->aCpus[i];
569
570 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
571 if (RT_FAILURE(rc))
572 pVCpu->em.s.fForceRAW = false;
573 AssertRCReturn(rc, rc);
574
575 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
576 {
577 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
578 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
579 AssertRCReturn(rc, rc);
580 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
581
582 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
583 }
584 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
585 {
586 /* Load mwait state. */
587 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.mwait.fWait);
588 AssertRCReturn(rc, rc);
589 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMWaitEAX);
590 AssertRCReturn(rc, rc);
591 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMWaitECX);
592 AssertRCReturn(rc, rc);
593 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorEAX);
594 AssertRCReturn(rc, rc);
595 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorECX);
596 AssertRCReturn(rc, rc);
597 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorEDX);
598 AssertRCReturn(rc, rc);
599 }
600
601 Assert(!pVCpu->em.s.pCliStatTree);
602 }
603 return VINF_SUCCESS;
604}
605
606
607/**
608 * Raise a fatal error.
609 *
610 * Safely terminate the VM with full state report and stuff. This function
611 * will naturally never return.
612 *
613 * @param pVCpu VMCPU handle.
614 * @param rc VBox status code.
615 */
616VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
617{
618 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
619 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
620 AssertReleaseMsgFailed(("longjmp returned!\n"));
621}
622
623
624/**
625 * Gets the EM state name.
626 *
627 * @returns pointer to read only state name,
628 * @param enmState The state.
629 */
630static const char *emR3GetStateName(EMSTATE enmState)
631{
632 switch (enmState)
633 {
634 case EMSTATE_NONE: return "EMSTATE_NONE";
635 case EMSTATE_RAW: return "EMSTATE_RAW";
636 case EMSTATE_HWACC: return "EMSTATE_HWACC";
637 case EMSTATE_REM: return "EMSTATE_REM";
638 case EMSTATE_PARAV: return "EMSTATE_PARAV";
639 case EMSTATE_HALTED: return "EMSTATE_HALTED";
640 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
641 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
642 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
643 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
644 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
645 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
646 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
647 default: return "Unknown!";
648 }
649}
650
651
652/**
653 * Debug loop.
654 *
655 * @returns VBox status code for EM.
656 * @param pVM VM handle.
657 * @param pVCpu VMCPU handle.
658 * @param rc Current EM VBox status code..
659 */
660static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
661{
662 for (;;)
663 {
664 Log(("emR3Debug: rc=%Rrc\n", rc));
665 const int rcLast = rc;
666
667 /*
668 * Debug related RC.
669 */
670 switch (rc)
671 {
672 /*
673 * Single step an instruction.
674 */
675 case VINF_EM_DBG_STEP:
676 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
677 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
678 || pVCpu->em.s.fForceRAW /* paranoia */)
679 rc = emR3RawStep(pVM, pVCpu);
680 else
681 {
682 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
683 rc = emR3RemStep(pVM, pVCpu);
684 }
685 break;
686
687 /*
688 * Simple events: stepped, breakpoint, stop/assertion.
689 */
690 case VINF_EM_DBG_STEPPED:
691 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
692 break;
693
694 case VINF_EM_DBG_BREAKPOINT:
695 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
696 break;
697
698 case VINF_EM_DBG_STOP:
699 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
700 break;
701
702 case VINF_EM_DBG_HYPER_STEPPED:
703 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
704 break;
705
706 case VINF_EM_DBG_HYPER_BREAKPOINT:
707 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
708 break;
709
710 case VINF_EM_DBG_HYPER_ASSERTION:
711 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
712 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
713 break;
714
715 /*
716 * Guru meditation.
717 */
718 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
719 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
720 break;
721 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
722 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
723 break;
724
725 default: /** @todo don't use default for guru, but make special errors code! */
726 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
727 break;
728 }
729
730 /*
731 * Process the result.
732 */
733 do
734 {
735 switch (rc)
736 {
737 /*
738 * Continue the debugging loop.
739 */
740 case VINF_EM_DBG_STEP:
741 case VINF_EM_DBG_STOP:
742 case VINF_EM_DBG_STEPPED:
743 case VINF_EM_DBG_BREAKPOINT:
744 case VINF_EM_DBG_HYPER_STEPPED:
745 case VINF_EM_DBG_HYPER_BREAKPOINT:
746 case VINF_EM_DBG_HYPER_ASSERTION:
747 break;
748
749 /*
750 * Resuming execution (in some form) has to be done here if we got
751 * a hypervisor debug event.
752 */
753 case VINF_SUCCESS:
754 case VINF_EM_RESUME:
755 case VINF_EM_SUSPEND:
756 case VINF_EM_RESCHEDULE:
757 case VINF_EM_RESCHEDULE_RAW:
758 case VINF_EM_RESCHEDULE_REM:
759 case VINF_EM_HALT:
760 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
761 {
762 rc = emR3RawResumeHyper(pVM, pVCpu);
763 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
764 continue;
765 }
766 if (rc == VINF_SUCCESS)
767 rc = VINF_EM_RESCHEDULE;
768 return rc;
769
770 /*
771 * The debugger isn't attached.
772 * We'll simply turn the thing off since that's the easiest thing to do.
773 */
774 case VERR_DBGF_NOT_ATTACHED:
775 switch (rcLast)
776 {
777 case VINF_EM_DBG_HYPER_STEPPED:
778 case VINF_EM_DBG_HYPER_BREAKPOINT:
779 case VINF_EM_DBG_HYPER_ASSERTION:
780 case VERR_TRPM_PANIC:
781 case VERR_TRPM_DONT_PANIC:
782 case VERR_VMM_RING0_ASSERTION:
783 case VERR_VMM_HYPER_CR3_MISMATCH:
784 case VERR_VMM_RING3_CALL_DISABLED:
785 return rcLast;
786 }
787 return VINF_EM_OFF;
788
789 /*
790 * Status codes terminating the VM in one or another sense.
791 */
792 case VINF_EM_TERMINATE:
793 case VINF_EM_OFF:
794 case VINF_EM_RESET:
795 case VINF_EM_NO_MEMORY:
796 case VINF_EM_RAW_STALE_SELECTOR:
797 case VINF_EM_RAW_IRET_TRAP:
798 case VERR_TRPM_PANIC:
799 case VERR_TRPM_DONT_PANIC:
800 case VERR_VMM_RING0_ASSERTION:
801 case VERR_VMM_HYPER_CR3_MISMATCH:
802 case VERR_VMM_RING3_CALL_DISABLED:
803 case VERR_INTERNAL_ERROR:
804 case VERR_INTERNAL_ERROR_2:
805 case VERR_INTERNAL_ERROR_3:
806 case VERR_INTERNAL_ERROR_4:
807 case VERR_INTERNAL_ERROR_5:
808 case VERR_IPE_UNEXPECTED_STATUS:
809 case VERR_IPE_UNEXPECTED_INFO_STATUS:
810 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
811 return rc;
812
813 /*
814 * The rest is unexpected, and will keep us here.
815 */
816 default:
817 AssertMsgFailed(("Unxpected rc %Rrc!\n", rc));
818 break;
819 }
820 } while (false);
821 } /* debug for ever */
822}
823
824/**
825 * Steps recompiled code.
826 *
827 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
828 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
829 *
830 * @param pVM VM handle.
831 * @param pVCpu VMCPU handle.
832 */
833static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
834{
835 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
836
837 EMRemLock(pVM);
838
839 /*
840 * Switch to REM, step instruction, switch back.
841 */
842 int rc = REMR3State(pVM, pVCpu);
843 if (RT_SUCCESS(rc))
844 {
845 rc = REMR3Step(pVM, pVCpu);
846 REMR3StateBack(pVM, pVCpu);
847 }
848 EMRemUnlock(pVM);
849
850 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
851 return rc;
852}
853
854
855/**
856 * emR3RemExecute helper that syncs the state back from REM and leave the REM
857 * critical section.
858 *
859 * @returns false - new fInREMState value.
860 * @param pVM The VM handle.
861 * @param pVCpu The virtual CPU handle.
862 */
863DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
864{
865 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
866 REMR3StateBack(pVM, pVCpu);
867 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
868
869 EMRemUnlock(pVM);
870 return false;
871}
872
873
874/**
875 * Executes recompiled code.
876 *
877 * This function contains the recompiler version of the inner
878 * execution loop (the outer loop being in EMR3ExecuteVM()).
879 *
880 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
881 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
882 *
883 * @param pVM VM handle.
884 * @param pVCpu VMCPU handle.
885 * @param pfFFDone Where to store an indicator telling wheter or not
886 * FFs were done before returning.
887 *
888 */
889static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
890{
891#ifdef LOG_ENABLED
892 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
893 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
894
895 if (pCtx->eflags.Bits.u1VM)
896 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
897 else
898 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
899#endif
900 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
901
902#if defined(VBOX_STRICT) && defined(DEBUG_bird)
903 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
904 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo #1419 - get flat address. */
905 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
906#endif
907
908 /*
909 * Spin till we get a forced action which returns anything but VINF_SUCCESS
910 * or the REM suggests raw-mode execution.
911 */
912 *pfFFDone = false;
913 bool fInREMState = false;
914 int rc = VINF_SUCCESS;
915 for (;;)
916 {
917 /*
918 * Lock REM and update the state if not already in sync.
919 *
920 * Note! Big lock, but you are not supposed to own any lock when
921 * coming in here.
922 */
923 if (!fInREMState)
924 {
925 EMRemLock(pVM);
926 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
927
928 /* Flush the recompiler translation blocks if the VCPU has changed,
929 also force a full CPU state resync. */
930 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
931 {
932 REMFlushTBs(pVM);
933 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
934 }
935 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
936
937 rc = REMR3State(pVM, pVCpu);
938
939 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
940 if (RT_FAILURE(rc))
941 break;
942 fInREMState = true;
943
944 /*
945 * We might have missed the raising of VMREQ, TIMER and some other
946 * imporant FFs while we were busy switching the state. So, check again.
947 */
948 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)
949 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
950 {
951 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
952 goto l_REMDoForcedActions;
953 }
954 }
955
956
957 /*
958 * Execute REM.
959 */
960 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
961 rc = REMR3Run(pVM, pVCpu);
962 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
963
964
965 /*
966 * Deal with high priority post execution FFs before doing anything
967 * else. Sync back the state and leave the lock to be on the safe side.
968 */
969 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
970 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
971 {
972 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
973 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
974 }
975
976 /*
977 * Process the returned status code.
978 */
979 if (rc != VINF_SUCCESS)
980 {
981 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
982 break;
983 if (rc != VINF_REM_INTERRUPED_FF)
984 {
985 /*
986 * Anything which is not known to us means an internal error
987 * and the termination of the VM!
988 */
989 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
990 break;
991 }
992 }
993
994
995 /*
996 * Check and execute forced actions.
997 *
998 * Sync back the VM state and leave the lock before calling any of
999 * these, you never know what's going to happen here.
1000 */
1001#ifdef VBOX_HIGH_RES_TIMERS_HACK
1002 TMTimerPollVoid(pVM, pVCpu);
1003#endif
1004 AssertCompile((VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
1005 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1006 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
1007 {
1008l_REMDoForcedActions:
1009 if (fInREMState)
1010 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1011 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1012 rc = emR3ForcedActions(pVM, pVCpu, rc);
1013 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1014 if ( rc != VINF_SUCCESS
1015 && rc != VINF_EM_RESCHEDULE_REM)
1016 {
1017 *pfFFDone = true;
1018 break;
1019 }
1020 }
1021
1022 } /* The Inner Loop, recompiled execution mode version. */
1023
1024
1025 /*
1026 * Returning. Sync back the VM state if required.
1027 */
1028 if (fInREMState)
1029 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1030
1031 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1032 return rc;
1033}
1034
1035
1036#ifdef DEBUG
1037
1038int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1039{
1040 EMSTATE enmOldState = pVCpu->em.s.enmState;
1041
1042 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1043
1044 Log(("Single step BEGIN:\n"));
1045 for (uint32_t i = 0; i < cIterations; i++)
1046 {
1047 DBGFR3PrgStep(pVCpu);
1048 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1049 emR3RemStep(pVM, pVCpu);
1050 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1051 break;
1052 }
1053 Log(("Single step END:\n"));
1054 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1055 pVCpu->em.s.enmState = enmOldState;
1056 return VINF_EM_RESCHEDULE;
1057}
1058
1059#endif /* DEBUG */
1060
1061
1062/**
1063 * Decides whether to execute RAW, HWACC or REM.
1064 *
1065 * @returns new EM state
1066 * @param pVM The VM.
1067 * @param pVCpu The VMCPU handle.
1068 * @param pCtx The CPU context.
1069 */
1070EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1071{
1072 /*
1073 * When forcing raw-mode execution, things are simple.
1074 */
1075 if (pVCpu->em.s.fForceRAW)
1076 return EMSTATE_RAW;
1077
1078 /*
1079 * We stay in the wait for SIPI state unless explicitly told otherwise.
1080 */
1081 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1082 return EMSTATE_WAIT_SIPI;
1083
1084 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1085 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1086 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1087
1088 X86EFLAGS EFlags = pCtx->eflags;
1089 if (HWACCMIsEnabled(pVM))
1090 {
1091 /* Hardware accelerated raw-mode:
1092 *
1093 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1094 */
1095 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
1096 return EMSTATE_HWACC;
1097
1098 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
1099 * off monitoring features essential for raw mode! */
1100 return EMSTATE_REM;
1101 }
1102
1103 /*
1104 * Standard raw-mode:
1105 *
1106 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1107 * or 32 bits protected mode ring 0 code
1108 *
1109 * The tests are ordered by the likelyhood of being true during normal execution.
1110 */
1111 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1112 {
1113 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1114 return EMSTATE_REM;
1115 }
1116
1117#ifndef VBOX_RAW_V86
1118 if (EFlags.u32 & X86_EFL_VM) {
1119 Log2(("raw mode refused: VM_MASK\n"));
1120 return EMSTATE_REM;
1121 }
1122#endif
1123
1124 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1125 uint32_t u32CR0 = pCtx->cr0;
1126 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1127 {
1128 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1129 return EMSTATE_REM;
1130 }
1131
1132 if (pCtx->cr4 & X86_CR4_PAE)
1133 {
1134 uint32_t u32Dummy, u32Features;
1135
1136 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1137 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1138 return EMSTATE_REM;
1139 }
1140
1141 unsigned uSS = pCtx->ss;
1142 if ( pCtx->eflags.Bits.u1VM
1143 || (uSS & X86_SEL_RPL) == 3)
1144 {
1145 if (!EMIsRawRing3Enabled(pVM))
1146 return EMSTATE_REM;
1147
1148 if (!(EFlags.u32 & X86_EFL_IF))
1149 {
1150 Log2(("raw mode refused: IF (RawR3)\n"));
1151 return EMSTATE_REM;
1152 }
1153
1154 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1155 {
1156 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1157 return EMSTATE_REM;
1158 }
1159 }
1160 else
1161 {
1162 if (!EMIsRawRing0Enabled(pVM))
1163 return EMSTATE_REM;
1164
1165 /* Only ring 0 supervisor code. */
1166 if ((uSS & X86_SEL_RPL) != 0)
1167 {
1168 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1169 return EMSTATE_REM;
1170 }
1171
1172 // Let's start with pure 32 bits ring 0 code first
1173 /** @todo What's pure 32-bit mode? flat? */
1174 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
1175 || !(pCtx->csHid.Attr.n.u1DefBig))
1176 {
1177 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1178 return EMSTATE_REM;
1179 }
1180
1181 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1182 if (!(u32CR0 & X86_CR0_WP))
1183 {
1184 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1185 return EMSTATE_REM;
1186 }
1187
1188 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1189 {
1190 Log2(("raw r0 mode forced: patch code\n"));
1191 return EMSTATE_RAW;
1192 }
1193
1194#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1195 if (!(EFlags.u32 & X86_EFL_IF))
1196 {
1197 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1198 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1199 return EMSTATE_REM;
1200 }
1201#endif
1202
1203 /** @todo still necessary??? */
1204 if (EFlags.Bits.u2IOPL != 0)
1205 {
1206 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1207 return EMSTATE_REM;
1208 }
1209 }
1210
1211 Assert(PGMPhysIsA20Enabled(pVCpu));
1212 return EMSTATE_RAW;
1213}
1214
1215
1216/**
1217 * Executes all high priority post execution force actions.
1218 *
1219 * @returns rc or a fatal status code.
1220 *
1221 * @param pVM VM handle.
1222 * @param pVCpu VMCPU handle.
1223 * @param rc The current rc.
1224 */
1225int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1226{
1227 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1228 PDMCritSectFF(pVCpu);
1229
1230 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1231 CSAMR3DoPendingAction(pVM, pVCpu);
1232
1233 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1234 {
1235 if ( rc > VINF_EM_NO_MEMORY
1236 && rc <= VINF_EM_LAST)
1237 rc = VINF_EM_NO_MEMORY;
1238 }
1239
1240 return rc;
1241}
1242
1243
1244/**
1245 * Executes all pending forced actions.
1246 *
1247 * Forced actions can cause execution delays and execution
1248 * rescheduling. The first we deal with using action priority, so
1249 * that for instance pending timers aren't scheduled and ran until
1250 * right before execution. The rescheduling we deal with using
1251 * return codes. The same goes for VM termination, only in that case
1252 * we exit everything.
1253 *
1254 * @returns VBox status code of equal or greater importance/severity than rc.
1255 * The most important ones are: VINF_EM_RESCHEDULE,
1256 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1257 *
1258 * @param pVM VM handle.
1259 * @param pVCpu VMCPU handle.
1260 * @param rc The current rc.
1261 *
1262 */
1263int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1264{
1265 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1266#ifdef VBOX_STRICT
1267 int rcIrq = VINF_SUCCESS;
1268#endif
1269 int rc2;
1270#define UPDATE_RC() \
1271 do { \
1272 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1273 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1274 break; \
1275 if (!rc || rc2 < rc) \
1276 rc = rc2; \
1277 } while (0)
1278
1279 /*
1280 * Post execution chunk first.
1281 */
1282 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1283 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
1284 {
1285 /*
1286 * EMT Rendezvous (must be serviced before termination).
1287 */
1288 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1289 {
1290 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1291 UPDATE_RC();
1292 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1293 * stopped/reset before the next VM state change is made. We need a better
1294 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1295 * && rc >= VINF_EM_SUSPEND). */
1296 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1297 {
1298 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1299 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1300 return rc;
1301 }
1302 }
1303
1304 /*
1305 * Termination request.
1306 */
1307 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
1308 {
1309 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
1310 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1311 return VINF_EM_TERMINATE;
1312 }
1313
1314 /*
1315 * Debugger Facility polling.
1316 */
1317 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
1318 {
1319 rc2 = DBGFR3VMMForcedAction(pVM);
1320 UPDATE_RC();
1321 }
1322
1323 /*
1324 * Postponed reset request.
1325 */
1326 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
1327 {
1328 rc2 = VMR3Reset(pVM);
1329 UPDATE_RC();
1330 }
1331
1332 /*
1333 * CSAM page scanning.
1334 */
1335 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1336 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1337 {
1338 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1339
1340 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1341 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1342
1343 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1344 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1345 }
1346
1347 /*
1348 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1349 */
1350 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1351 {
1352 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1353 UPDATE_RC();
1354 if (rc == VINF_EM_NO_MEMORY)
1355 return rc;
1356 }
1357
1358 /* check that we got them all */
1359 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1360 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
1361 }
1362
1363 /*
1364 * Normal priority then.
1365 * (Executed in no particular order.)
1366 */
1367 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1368 {
1369 /*
1370 * PDM Queues are pending.
1371 */
1372 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1373 PDMR3QueueFlushAll(pVM);
1374
1375 /*
1376 * PDM DMA transfers are pending.
1377 */
1378 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1379 PDMR3DmaRun(pVM);
1380
1381 /*
1382 * EMT Rendezvous (make sure they are handled before the requests).
1383 */
1384 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1385 {
1386 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1387 UPDATE_RC();
1388 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1389 * stopped/reset before the next VM state change is made. We need a better
1390 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1391 * && rc >= VINF_EM_SUSPEND). */
1392 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1393 {
1394 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1395 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1396 return rc;
1397 }
1398 }
1399
1400 /*
1401 * Requests from other threads.
1402 */
1403 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1404 {
1405 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY);
1406 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1407 {
1408 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1409 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1410 return rc2;
1411 }
1412 UPDATE_RC();
1413 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1414 * stopped/reset before the next VM state change is made. We need a better
1415 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1416 * && rc >= VINF_EM_SUSPEND). */
1417 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1418 {
1419 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1420 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1421 return rc;
1422 }
1423 }
1424
1425 /* Replay the handler notification changes. */
1426 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1427 {
1428 /* Try not to cause deadlocks. */
1429 if ( pVM->cCpus == 1
1430 || ( !PGMIsLockOwner(pVM)
1431 && !IOMIsLockOwner(pVM))
1432 )
1433 {
1434 EMRemLock(pVM);
1435 REMR3ReplayHandlerNotifications(pVM);
1436 EMRemUnlock(pVM);
1437 }
1438 }
1439
1440 /* check that we got them all */
1441 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1442 }
1443
1444 /*
1445 * Normal priority then. (per-VCPU)
1446 * (Executed in no particular order.)
1447 */
1448 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1449 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1450 {
1451 /*
1452 * Requests from other threads.
1453 */
1454 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
1455 {
1456 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu);
1457 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1458 {
1459 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1460 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1461 return rc2;
1462 }
1463 UPDATE_RC();
1464 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1465 * stopped/reset before the next VM state change is made. We need a better
1466 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1467 * && rc >= VINF_EM_SUSPEND). */
1468 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1469 {
1470 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1471 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1472 return rc;
1473 }
1474 }
1475
1476 /* check that we got them all */
1477 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1478 }
1479
1480 /*
1481 * High priority pre execution chunk last.
1482 * (Executed in ascending priority order.)
1483 */
1484 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1485 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1486 {
1487 /*
1488 * Timers before interrupts.
1489 */
1490 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
1491 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1492 TMR3TimerQueuesDo(pVM);
1493
1494 /*
1495 * The instruction following an emulated STI should *always* be executed!
1496 */
1497 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1498 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1499 {
1500 Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1501 if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1502 {
1503 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
1504 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
1505 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
1506 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
1507 */
1508 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1509 }
1510 if (HWACCMR3IsActive(pVCpu))
1511 rc2 = VINF_EM_RESCHEDULE_HWACC;
1512 else
1513 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
1514
1515 UPDATE_RC();
1516 }
1517
1518 /*
1519 * Interrupts.
1520 */
1521 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1522 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1523 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
1524 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1525 && PATMAreInterruptsEnabled(pVM)
1526 && !HWACCMR3IsEventPending(pVCpu))
1527 {
1528 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1529 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1530 {
1531 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1532 /** @todo this really isn't nice, should properly handle this */
1533 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1534#ifdef VBOX_STRICT
1535 rcIrq = rc2;
1536#endif
1537 UPDATE_RC();
1538 }
1539 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1540 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1541 {
1542 rc2 = VINF_EM_RESCHEDULE_REM;
1543 UPDATE_RC();
1544 }
1545 }
1546
1547 /*
1548 * Allocate handy pages.
1549 */
1550 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1551 {
1552 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1553 UPDATE_RC();
1554 }
1555
1556 /*
1557 * Debugger Facility request.
1558 */
1559 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1560 {
1561 rc2 = DBGFR3VMMForcedAction(pVM);
1562 UPDATE_RC();
1563 }
1564
1565 /*
1566 * EMT Rendezvous (must be serviced before termination).
1567 */
1568 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1569 {
1570 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1571 UPDATE_RC();
1572 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1573 * stopped/reset before the next VM state change is made. We need a better
1574 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1575 * && rc >= VINF_EM_SUSPEND). */
1576 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1577 {
1578 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1579 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1580 return rc;
1581 }
1582 }
1583
1584 /*
1585 * Termination request.
1586 */
1587 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
1588 {
1589 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
1590 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1591 return VINF_EM_TERMINATE;
1592 }
1593
1594 /*
1595 * Out of memory? Since most of our fellow high priority actions may cause us
1596 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1597 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1598 * than us since we can terminate without allocating more memory.
1599 */
1600 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1601 {
1602 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1603 UPDATE_RC();
1604 if (rc == VINF_EM_NO_MEMORY)
1605 return rc;
1606 }
1607
1608 /*
1609 * If the virtual sync clock is still stopped, make TM restart it.
1610 */
1611 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
1612 TMR3VirtualSyncFF(pVM, pVCpu);
1613
1614#ifdef DEBUG
1615 /*
1616 * Debug, pause the VM.
1617 */
1618 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
1619 {
1620 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
1621 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
1622 return VINF_EM_SUSPEND;
1623 }
1624#endif
1625
1626 /* check that we got them all */
1627 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1628 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
1629 }
1630
1631#undef UPDATE_RC
1632 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1633 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1634 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
1635 return rc;
1636}
1637
1638/**
1639 * Release the IOM lock if owned by the current VCPU
1640 *
1641 * @param pVM The VM to operate on.
1642 */
1643VMMR3DECL(void) EMR3ReleaseOwnedLocks(PVM pVM)
1644{
1645 while (PDMCritSectIsOwner(&pVM->em.s.CritSectREM))
1646 PDMCritSectLeave(&pVM->em.s.CritSectREM);
1647}
1648
1649
1650/**
1651 * Execute VM.
1652 *
1653 * This function is the main loop of the VM. The emulation thread
1654 * calls this function when the VM has been successfully constructed
1655 * and we're ready for executing the VM.
1656 *
1657 * Returning from this function means that the VM is turned off or
1658 * suspended (state already saved) and deconstruction in next in line.
1659 *
1660 * All interaction from other thread are done using forced actions
1661 * and signaling of the wait object.
1662 *
1663 * @returns VBox status code, informational status codes may indicate failure.
1664 * @param pVM The VM to operate on.
1665 * @param pVCpu The VMCPU to operate on.
1666 */
1667VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
1668{
1669 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
1670 pVM,
1671 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
1672 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
1673 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
1674 pVCpu->em.s.fForceRAW));
1675 VM_ASSERT_EMT(pVM);
1676 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
1677 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
1678 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
1679 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
1680
1681 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
1682 if (rc == 0)
1683 {
1684 /*
1685 * Start the virtual time.
1686 */
1687 TMR3NotifyResume(pVM, pVCpu);
1688
1689 /*
1690 * The Outer Main Loop.
1691 */
1692 bool fFFDone = false;
1693
1694 /* Reschedule right away to start in the right state. */
1695 rc = VINF_SUCCESS;
1696
1697 /* If resuming after a pause or a state load, restore the previous
1698 state or else we'll start executing code. Else, just reschedule. */
1699 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
1700 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1701 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
1702 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
1703 else
1704 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1705
1706 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1707 for (;;)
1708 {
1709 /*
1710 * Before we can schedule anything (we're here because
1711 * scheduling is required) we must service any pending
1712 * forced actions to avoid any pending action causing
1713 * immediate rescheduling upon entering an inner loop
1714 *
1715 * Do forced actions.
1716 */
1717 if ( !fFFDone
1718 && rc != VINF_EM_TERMINATE
1719 && rc != VINF_EM_OFF
1720 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1721 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK)))
1722 {
1723 rc = emR3ForcedActions(pVM, pVCpu, rc);
1724 if ( ( rc == VINF_EM_RESCHEDULE_REM
1725 || rc == VINF_EM_RESCHEDULE_HWACC)
1726 && pVCpu->em.s.fForceRAW)
1727 rc = VINF_EM_RESCHEDULE_RAW;
1728 }
1729 else if (fFFDone)
1730 fFFDone = false;
1731
1732 /*
1733 * Now what to do?
1734 */
1735 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
1736 switch (rc)
1737 {
1738 /*
1739 * Keep doing what we're currently doing.
1740 */
1741 case VINF_SUCCESS:
1742 break;
1743
1744 /*
1745 * Reschedule - to raw-mode execution.
1746 */
1747 case VINF_EM_RESCHEDULE_RAW:
1748 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVCpu->em.s.enmState, EMSTATE_RAW));
1749 pVCpu->em.s.enmState = EMSTATE_RAW;
1750 break;
1751
1752 /*
1753 * Reschedule - to hardware accelerated raw-mode execution.
1754 */
1755 case VINF_EM_RESCHEDULE_HWACC:
1756 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVCpu->em.s.enmState, EMSTATE_HWACC));
1757 Assert(!pVCpu->em.s.fForceRAW);
1758 pVCpu->em.s.enmState = EMSTATE_HWACC;
1759 break;
1760
1761 /*
1762 * Reschedule - to recompiled execution.
1763 */
1764 case VINF_EM_RESCHEDULE_REM:
1765 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVCpu->em.s.enmState, EMSTATE_REM));
1766 pVCpu->em.s.enmState = EMSTATE_REM;
1767 break;
1768
1769#ifdef VBOX_WITH_VMI
1770 /*
1771 * Reschedule - parav call.
1772 */
1773 case VINF_EM_RESCHEDULE_PARAV:
1774 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_PARAV: %d -> %d (EMSTATE_PARAV)\n", pVCpu->em.s.enmState, EMSTATE_PARAV));
1775 pVCpu->em.s.enmState = EMSTATE_PARAV;
1776 break;
1777#endif
1778
1779 /*
1780 * Resume.
1781 */
1782 case VINF_EM_RESUME:
1783 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVCpu->em.s.enmState));
1784 /* Don't reschedule in the halted or wait for SIPI case. */
1785 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1786 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
1787 break;
1788 /* fall through and get scheduled. */
1789
1790 /*
1791 * Reschedule.
1792 */
1793 case VINF_EM_RESCHEDULE:
1794 {
1795 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1796 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1797 pVCpu->em.s.enmState = enmState;
1798 break;
1799 }
1800
1801 /*
1802 * Halted.
1803 */
1804 case VINF_EM_HALT:
1805 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_HALTED));
1806 pVCpu->em.s.enmState = EMSTATE_HALTED;
1807 break;
1808
1809 /*
1810 * Switch to the wait for SIPI state (application processor only)
1811 */
1812 case VINF_EM_WAIT_SIPI:
1813 Assert(pVCpu->idCpu != 0);
1814 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_WAIT_SIPI));
1815 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1816 break;
1817
1818
1819 /*
1820 * Suspend.
1821 */
1822 case VINF_EM_SUSPEND:
1823 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1824 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1825 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1826 break;
1827
1828 /*
1829 * Reset.
1830 * We might end up doing a double reset for now, we'll have to clean up the mess later.
1831 */
1832 case VINF_EM_RESET:
1833 {
1834 if (pVCpu->idCpu == 0)
1835 {
1836 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1837 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1838 pVCpu->em.s.enmState = enmState;
1839 }
1840 else
1841 {
1842 /* All other VCPUs go into the wait for SIPI state. */
1843 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1844 }
1845 break;
1846 }
1847
1848 /*
1849 * Power Off.
1850 */
1851 case VINF_EM_OFF:
1852 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1853 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1854 TMR3NotifySuspend(pVM, pVCpu);
1855 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1856 return rc;
1857
1858 /*
1859 * Terminate the VM.
1860 */
1861 case VINF_EM_TERMINATE:
1862 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1863 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1864 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
1865 TMR3NotifySuspend(pVM, pVCpu);
1866 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1867 return rc;
1868
1869
1870 /*
1871 * Out of memory, suspend the VM and stuff.
1872 */
1873 case VINF_EM_NO_MEMORY:
1874 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1875 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1876 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1877 TMR3NotifySuspend(pVM, pVCpu);
1878 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1879
1880 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
1881 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
1882 if (rc != VINF_EM_SUSPEND)
1883 {
1884 if (RT_SUCCESS_NP(rc))
1885 {
1886 AssertLogRelMsgFailed(("%Rrc\n", rc));
1887 rc = VERR_EM_INTERNAL_ERROR;
1888 }
1889 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1890 }
1891 return rc;
1892
1893 /*
1894 * Guest debug events.
1895 */
1896 case VINF_EM_DBG_STEPPED:
1897 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
1898 case VINF_EM_DBG_STOP:
1899 case VINF_EM_DBG_BREAKPOINT:
1900 case VINF_EM_DBG_STEP:
1901 if (pVCpu->em.s.enmState == EMSTATE_RAW)
1902 {
1903 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
1904 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1905 }
1906 else
1907 {
1908 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
1909 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1910 }
1911 break;
1912
1913 /*
1914 * Hypervisor debug events.
1915 */
1916 case VINF_EM_DBG_HYPER_STEPPED:
1917 case VINF_EM_DBG_HYPER_BREAKPOINT:
1918 case VINF_EM_DBG_HYPER_ASSERTION:
1919 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_HYPER));
1920 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
1921 break;
1922
1923 /*
1924 * Guru mediations.
1925 */
1926 case VERR_VMM_RING0_ASSERTION:
1927 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
1928 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1929 break;
1930
1931 /*
1932 * Any error code showing up here other than the ones we
1933 * know and process above are considered to be FATAL.
1934 *
1935 * Unknown warnings and informational status codes are also
1936 * included in this.
1937 */
1938 default:
1939 if (RT_SUCCESS_NP(rc))
1940 {
1941 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
1942 rc = VERR_EM_INTERNAL_ERROR;
1943 }
1944 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
1945 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1946 break;
1947 }
1948
1949 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
1950 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1951
1952 /*
1953 * Act on the state.
1954 */
1955 switch (pVCpu->em.s.enmState)
1956 {
1957 /*
1958 * Execute raw.
1959 */
1960 case EMSTATE_RAW:
1961 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
1962 break;
1963
1964 /*
1965 * Execute hardware accelerated raw.
1966 */
1967 case EMSTATE_HWACC:
1968 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
1969 break;
1970
1971 /*
1972 * Execute recompiled.
1973 */
1974 case EMSTATE_REM:
1975 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
1976 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
1977 break;
1978
1979#ifdef VBOX_WITH_VMI
1980 /*
1981 * Execute PARAV function.
1982 */
1983 case EMSTATE_PARAV:
1984 rc = PARAVCallFunction(pVM);
1985 pVCpu->em.s.enmState = EMSTATE_REM;
1986 break;
1987#endif
1988
1989 /*
1990 * Application processor execution halted until SIPI.
1991 */
1992 case EMSTATE_WAIT_SIPI:
1993 /* no break */
1994 /*
1995 * hlt - execution halted until interrupt.
1996 */
1997 case EMSTATE_HALTED:
1998 {
1999 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2000 if (pVCpu->em.s.mwait.fWait & EMMWAIT_FLAG_ACTIVE)
2001 {
2002 /* mwait has a special extension where it's woken up when an interrupt is pending even when IF=0. */
2003 rc = VMR3WaitHalted(pVM, pVCpu, !(pVCpu->em.s.mwait.fWait & EMMWAIT_FLAG_BREAKIRQIF0) && !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2004 pVCpu->em.s.mwait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2005 }
2006 else
2007 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2008
2009 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2010 break;
2011 }
2012
2013 /*
2014 * Suspended - return to VM.cpp.
2015 */
2016 case EMSTATE_SUSPENDED:
2017 TMR3NotifySuspend(pVM, pVCpu);
2018 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2019 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2020 return VINF_EM_SUSPEND;
2021
2022 /*
2023 * Debugging in the guest.
2024 */
2025 case EMSTATE_DEBUG_GUEST_REM:
2026 case EMSTATE_DEBUG_GUEST_RAW:
2027 TMR3NotifySuspend(pVM, pVCpu);
2028 rc = emR3Debug(pVM, pVCpu, rc);
2029 TMR3NotifyResume(pVM, pVCpu);
2030 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2031 break;
2032
2033 /*
2034 * Debugging in the hypervisor.
2035 */
2036 case EMSTATE_DEBUG_HYPER:
2037 {
2038 TMR3NotifySuspend(pVM, pVCpu);
2039 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2040
2041 rc = emR3Debug(pVM, pVCpu, rc);
2042 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2043 if (rc != VINF_SUCCESS)
2044 {
2045 /* switch to guru meditation mode */
2046 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2047 VMMR3FatalDump(pVM, pVCpu, rc);
2048 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2049 return rc;
2050 }
2051
2052 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2053 TMR3NotifyResume(pVM, pVCpu);
2054 break;
2055 }
2056
2057 /*
2058 * Guru meditation takes place in the debugger.
2059 */
2060 case EMSTATE_GURU_MEDITATION:
2061 {
2062 TMR3NotifySuspend(pVM, pVCpu);
2063 VMMR3FatalDump(pVM, pVCpu, rc);
2064 emR3Debug(pVM, pVCpu, rc);
2065 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2066 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2067 return rc;
2068 }
2069
2070 /*
2071 * The states we don't expect here.
2072 */
2073 case EMSTATE_NONE:
2074 case EMSTATE_TERMINATING:
2075 default:
2076 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2077 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2078 TMR3NotifySuspend(pVM, pVCpu);
2079 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2080 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2081 return VERR_EM_INTERNAL_ERROR;
2082 }
2083 } /* The Outer Main Loop */
2084 }
2085 else
2086 {
2087 /*
2088 * Fatal error.
2089 */
2090 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2091 TMR3NotifySuspend(pVM, pVCpu);
2092 VMMR3FatalDump(pVM, pVCpu, rc);
2093 emR3Debug(pVM, pVCpu, rc);
2094 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2095 /** @todo change the VM state! */
2096 return rc;
2097 }
2098
2099 /* (won't ever get here). */
2100 AssertFailed();
2101}
2102
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette