VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 28549

Last change on this file since 28549 was 27493, checked in by vboxsync, 15 years ago

EM: typo in stats name

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 98.5 KB
Line 
1/* $Id: EM.cpp 27493 2010-03-18 17:21:43Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/** @page pg_em EM - The Execution Monitor / Manager
23 *
24 * The Execution Monitor/Manager is responsible for running the VM, scheduling
25 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
26 * Interpreted), and keeping the CPU states in sync. The function
27 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
28 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
29 * emR3RemExecute).
30 *
31 * The interpreted execution is only used to avoid switching between
32 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
33 * The interpretation is thus implemented as part of EM.
34 *
35 * @see grp_em
36 */
37
38/*******************************************************************************
39* Header Files *
40*******************************************************************************/
41#define LOG_GROUP LOG_GROUP_EM
42#include <VBox/em.h>
43#include <VBox/vmm.h>
44#ifdef VBOX_WITH_VMI
45# include <VBox/parav.h>
46#endif
47#include <VBox/patm.h>
48#include <VBox/csam.h>
49#include <VBox/selm.h>
50#include <VBox/trpm.h>
51#include <VBox/iom.h>
52#include <VBox/dbgf.h>
53#include <VBox/pgm.h>
54#include <VBox/rem.h>
55#include <VBox/tm.h>
56#include <VBox/mm.h>
57#include <VBox/ssm.h>
58#include <VBox/pdmapi.h>
59#include <VBox/pdmcritsect.h>
60#include <VBox/pdmqueue.h>
61#include <VBox/hwaccm.h>
62#include <VBox/patm.h>
63#include "EMInternal.h"
64#include <VBox/vm.h>
65#include <VBox/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include <VBox/dbgf.h>
69
70#include <iprt/string.h>
71#include <iprt/stream.h>
72
73
74/*******************************************************************************
75* Defined Constants And Macros *
76*******************************************************************************/
77#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
78#define EM_NOTIFY_HWACCM
79#endif
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87static const char *emR3GetStateName(EMSTATE enmState);
88static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
89static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
90static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
91int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
92
93
94/**
95 * Initializes the EM.
96 *
97 * @returns VBox status code.
98 * @param pVM The VM to operate on.
99 */
100VMMR3DECL(int) EMR3Init(PVM pVM)
101{
102 LogFlow(("EMR3Init\n"));
103 /*
104 * Assert alignment and sizes.
105 */
106 AssertCompileMemberAlignment(VM, em.s, 32);
107 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
108 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
109 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
110
111 /*
112 * Init the structure.
113 */
114 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
115 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
116 if (RT_FAILURE(rc))
117 pVM->fRawR3Enabled = true;
118 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
119 if (RT_FAILURE(rc))
120 pVM->fRawR0Enabled = true;
121 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
122
123 /*
124 * Initialize the REM critical section.
125 */
126 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
127 AssertRCReturn(rc, rc);
128
129 /*
130 * Saved state.
131 */
132 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
133 NULL, NULL, NULL,
134 NULL, emR3Save, NULL,
135 NULL, emR3Load, NULL);
136 if (RT_FAILURE(rc))
137 return rc;
138
139 for (VMCPUID i = 0; i < pVM->cCpus; i++)
140 {
141 PVMCPU pVCpu = &pVM->aCpus[i];
142
143 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
144
145 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
146 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
147 pVCpu->em.s.fForceRAW = false;
148
149 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
150 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
151 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
152
153# define EM_REG_COUNTER(a, b, c) \
154 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
155 AssertRC(rc);
156
157# define EM_REG_COUNTER_USED(a, b, c) \
158 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
159 AssertRC(rc);
160
161# define EM_REG_PROFILE(a, b, c) \
162 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
163 AssertRC(rc);
164
165# define EM_REG_PROFILE_ADV(a, b, c) \
166 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
167 AssertRC(rc);
168
169 /*
170 * Statistics.
171 */
172#ifdef VBOX_WITH_STATISTICS
173 PEMSTATS pStats;
174 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
175 if (RT_FAILURE(rc))
176 return rc;
177
178 pVCpu->em.s.pStatsR3 = pStats;
179 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
180 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
181
182 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
183 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
184
185 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
186 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
187
188 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
189 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
190 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
191 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
192 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
193 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
194 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
195 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
196 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
197 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
198 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
199 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
200 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
201 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
202 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
203 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
204 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
205 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
206 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
207 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
208 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
209 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
210 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
211 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
212 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
213 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
214 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
260
261 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
262 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
263
264 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
314
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
343
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
348
349 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
350 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
351 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
352 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
353 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "Number of restarted i/o instructions.");
354 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
355 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
356 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
357 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 read instructions.");
358 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 read instructions.");
359 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 read instructions.");
360 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 read instructions.");
361 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 read instructions.");
362 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 write instructions.");
363 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 write instructions.");
364 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 write instructions.");
365 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 write instructions.");
366 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 write instructions.");
367 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
368 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
369 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
370 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
371 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
372 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
373 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
374 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
375 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
376
377 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
378 pVCpu->em.s.pCliStatTree = 0;
379
380 /* these should be considered for release statistics. */
381 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
382 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
383 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
384 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
385 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
386 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
387 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
388 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
389 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
390 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
391
392#endif /* VBOX_WITH_STATISTICS */
393
394 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
395 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
396 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
397 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
398
399 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
400 }
401
402 return VINF_SUCCESS;
403}
404
405
406/**
407 * Initializes the per-VCPU EM.
408 *
409 * @returns VBox status code.
410 * @param pVM The VM to operate on.
411 */
412VMMR3DECL(int) EMR3InitCPU(PVM pVM)
413{
414 LogFlow(("EMR3InitCPU\n"));
415 return VINF_SUCCESS;
416}
417
418
419/**
420 * Applies relocations to data and code managed by this
421 * component. This function will be called at init and
422 * whenever the VMM need to relocate it self inside the GC.
423 *
424 * @param pVM The VM.
425 */
426VMMR3DECL(void) EMR3Relocate(PVM pVM)
427{
428 LogFlow(("EMR3Relocate\n"));
429 for (VMCPUID i = 0; i < pVM->cCpus; i++)
430 {
431 PVMCPU pVCpu = &pVM->aCpus[i];
432 if (pVCpu->em.s.pStatsR3)
433 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
434 }
435}
436
437
438/**
439 * Reset the EM state for a CPU.
440 *
441 * Called by EMR3Reset and hot plugging.
442 *
443 * @param pVCpu The virtual CPU.
444 */
445VMMR3DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
446{
447 pVCpu->em.s.fForceRAW = false;
448
449 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
450 out of the HALTED state here so that enmPrevState doesn't end up as
451 HALTED when EMR3Execute returns. */
452 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
453 {
454 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
455 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
456 }
457}
458
459
460/**
461 * Reset notification.
462 *
463 * @param pVM The VM handle.
464 */
465VMMR3DECL(void) EMR3Reset(PVM pVM)
466{
467 Log(("EMR3Reset: \n"));
468 for (VMCPUID i = 0; i < pVM->cCpus; i++)
469 EMR3ResetCpu(&pVM->aCpus[i]);
470}
471
472
473/**
474 * Terminates the EM.
475 *
476 * Termination means cleaning up and freeing all resources,
477 * the VM it self is at this point powered off or suspended.
478 *
479 * @returns VBox status code.
480 * @param pVM The VM to operate on.
481 */
482VMMR3DECL(int) EMR3Term(PVM pVM)
483{
484 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
485
486 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
487 return VINF_SUCCESS;
488}
489
490/**
491 * Terminates the per-VCPU EM.
492 *
493 * Termination means cleaning up and freeing all resources,
494 * the VM it self is at this point powered off or suspended.
495 *
496 * @returns VBox status code.
497 * @param pVM The VM to operate on.
498 */
499VMMR3DECL(int) EMR3TermCPU(PVM pVM)
500{
501 return 0;
502}
503
504/**
505 * Execute state save operation.
506 *
507 * @returns VBox status code.
508 * @param pVM VM Handle.
509 * @param pSSM SSM operation handle.
510 */
511static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
512{
513 for (VMCPUID i = 0; i < pVM->cCpus; i++)
514 {
515 PVMCPU pVCpu = &pVM->aCpus[i];
516
517 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
518 AssertRCReturn(rc, rc);
519
520 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
521 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
522 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
523 AssertRCReturn(rc, rc);
524
525 /* Save mwait state. */
526 rc = SSMR3PutU32(pSSM, pVCpu->em.s.mwait.fWait);
527 AssertRCReturn(rc, rc);
528 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMWaitEAX);
529 AssertRCReturn(rc, rc);
530 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMWaitECX);
531 AssertRCReturn(rc, rc);
532 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorEAX);
533 AssertRCReturn(rc, rc);
534 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorECX);
535 AssertRCReturn(rc, rc);
536 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorEDX);
537 AssertRCReturn(rc, rc);
538 }
539 return VINF_SUCCESS;
540}
541
542
543/**
544 * Execute state load operation.
545 *
546 * @returns VBox status code.
547 * @param pVM VM Handle.
548 * @param pSSM SSM operation handle.
549 * @param uVersion Data layout version.
550 * @param uPass The data pass.
551 */
552static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
553{
554 /*
555 * Validate version.
556 */
557 if ( uVersion != EM_SAVED_STATE_VERSION
558 && uVersion != EM_SAVED_STATE_VERSION_PRE_MWAIT
559 && uVersion != EM_SAVED_STATE_VERSION_PRE_SMP)
560 {
561 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
562 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
563 }
564 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
565
566 /*
567 * Load the saved state.
568 */
569 for (VMCPUID i = 0; i < pVM->cCpus; i++)
570 {
571 PVMCPU pVCpu = &pVM->aCpus[i];
572
573 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
574 if (RT_FAILURE(rc))
575 pVCpu->em.s.fForceRAW = false;
576 AssertRCReturn(rc, rc);
577
578 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
579 {
580 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
581 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
582 AssertRCReturn(rc, rc);
583 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
584
585 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
586 }
587 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
588 {
589 /* Load mwait state. */
590 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.mwait.fWait);
591 AssertRCReturn(rc, rc);
592 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMWaitEAX);
593 AssertRCReturn(rc, rc);
594 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMWaitECX);
595 AssertRCReturn(rc, rc);
596 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorEAX);
597 AssertRCReturn(rc, rc);
598 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorECX);
599 AssertRCReturn(rc, rc);
600 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorEDX);
601 AssertRCReturn(rc, rc);
602 }
603
604 Assert(!pVCpu->em.s.pCliStatTree);
605 }
606 return VINF_SUCCESS;
607}
608
609
610/**
611 * Raise a fatal error.
612 *
613 * Safely terminate the VM with full state report and stuff. This function
614 * will naturally never return.
615 *
616 * @param pVCpu VMCPU handle.
617 * @param rc VBox status code.
618 */
619VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
620{
621 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
622 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
623 AssertReleaseMsgFailed(("longjmp returned!\n"));
624}
625
626
627/**
628 * Gets the EM state name.
629 *
630 * @returns pointer to read only state name,
631 * @param enmState The state.
632 */
633static const char *emR3GetStateName(EMSTATE enmState)
634{
635 switch (enmState)
636 {
637 case EMSTATE_NONE: return "EMSTATE_NONE";
638 case EMSTATE_RAW: return "EMSTATE_RAW";
639 case EMSTATE_HWACC: return "EMSTATE_HWACC";
640 case EMSTATE_REM: return "EMSTATE_REM";
641 case EMSTATE_PARAV: return "EMSTATE_PARAV";
642 case EMSTATE_HALTED: return "EMSTATE_HALTED";
643 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
644 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
645 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
646 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
647 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
648 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
649 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
650 default: return "Unknown!";
651 }
652}
653
654
655/**
656 * Debug loop.
657 *
658 * @returns VBox status code for EM.
659 * @param pVM VM handle.
660 * @param pVCpu VMCPU handle.
661 * @param rc Current EM VBox status code..
662 */
663static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
664{
665 for (;;)
666 {
667 Log(("emR3Debug: rc=%Rrc\n", rc));
668 const int rcLast = rc;
669
670 /*
671 * Debug related RC.
672 */
673 switch (rc)
674 {
675 /*
676 * Single step an instruction.
677 */
678 case VINF_EM_DBG_STEP:
679 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
680 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
681 || pVCpu->em.s.fForceRAW /* paranoia */)
682 rc = emR3RawStep(pVM, pVCpu);
683 else
684 {
685 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
686 rc = emR3RemStep(pVM, pVCpu);
687 }
688 break;
689
690 /*
691 * Simple events: stepped, breakpoint, stop/assertion.
692 */
693 case VINF_EM_DBG_STEPPED:
694 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
695 break;
696
697 case VINF_EM_DBG_BREAKPOINT:
698 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
699 break;
700
701 case VINF_EM_DBG_STOP:
702 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
703 break;
704
705 case VINF_EM_DBG_HYPER_STEPPED:
706 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
707 break;
708
709 case VINF_EM_DBG_HYPER_BREAKPOINT:
710 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
711 break;
712
713 case VINF_EM_DBG_HYPER_ASSERTION:
714 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
715 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
716 break;
717
718 /*
719 * Guru meditation.
720 */
721 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
722 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
723 break;
724 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
725 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
726 break;
727
728 default: /** @todo don't use default for guru, but make special errors code! */
729 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
730 break;
731 }
732
733 /*
734 * Process the result.
735 */
736 do
737 {
738 switch (rc)
739 {
740 /*
741 * Continue the debugging loop.
742 */
743 case VINF_EM_DBG_STEP:
744 case VINF_EM_DBG_STOP:
745 case VINF_EM_DBG_STEPPED:
746 case VINF_EM_DBG_BREAKPOINT:
747 case VINF_EM_DBG_HYPER_STEPPED:
748 case VINF_EM_DBG_HYPER_BREAKPOINT:
749 case VINF_EM_DBG_HYPER_ASSERTION:
750 break;
751
752 /*
753 * Resuming execution (in some form) has to be done here if we got
754 * a hypervisor debug event.
755 */
756 case VINF_SUCCESS:
757 case VINF_EM_RESUME:
758 case VINF_EM_SUSPEND:
759 case VINF_EM_RESCHEDULE:
760 case VINF_EM_RESCHEDULE_RAW:
761 case VINF_EM_RESCHEDULE_REM:
762 case VINF_EM_HALT:
763 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
764 {
765 rc = emR3RawResumeHyper(pVM, pVCpu);
766 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
767 continue;
768 }
769 if (rc == VINF_SUCCESS)
770 rc = VINF_EM_RESCHEDULE;
771 return rc;
772
773 /*
774 * The debugger isn't attached.
775 * We'll simply turn the thing off since that's the easiest thing to do.
776 */
777 case VERR_DBGF_NOT_ATTACHED:
778 switch (rcLast)
779 {
780 case VINF_EM_DBG_HYPER_STEPPED:
781 case VINF_EM_DBG_HYPER_BREAKPOINT:
782 case VINF_EM_DBG_HYPER_ASSERTION:
783 case VERR_TRPM_PANIC:
784 case VERR_TRPM_DONT_PANIC:
785 case VERR_VMM_RING0_ASSERTION:
786 case VERR_VMM_HYPER_CR3_MISMATCH:
787 case VERR_VMM_RING3_CALL_DISABLED:
788 return rcLast;
789 }
790 return VINF_EM_OFF;
791
792 /*
793 * Status codes terminating the VM in one or another sense.
794 */
795 case VINF_EM_TERMINATE:
796 case VINF_EM_OFF:
797 case VINF_EM_RESET:
798 case VINF_EM_NO_MEMORY:
799 case VINF_EM_RAW_STALE_SELECTOR:
800 case VINF_EM_RAW_IRET_TRAP:
801 case VERR_TRPM_PANIC:
802 case VERR_TRPM_DONT_PANIC:
803 case VERR_VMM_RING0_ASSERTION:
804 case VERR_VMM_HYPER_CR3_MISMATCH:
805 case VERR_VMM_RING3_CALL_DISABLED:
806 case VERR_INTERNAL_ERROR:
807 case VERR_INTERNAL_ERROR_2:
808 case VERR_INTERNAL_ERROR_3:
809 case VERR_INTERNAL_ERROR_4:
810 case VERR_INTERNAL_ERROR_5:
811 case VERR_IPE_UNEXPECTED_STATUS:
812 case VERR_IPE_UNEXPECTED_INFO_STATUS:
813 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
814 return rc;
815
816 /*
817 * The rest is unexpected, and will keep us here.
818 */
819 default:
820 AssertMsgFailed(("Unxpected rc %Rrc!\n", rc));
821 break;
822 }
823 } while (false);
824 } /* debug for ever */
825}
826
827/**
828 * Steps recompiled code.
829 *
830 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
831 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
832 *
833 * @param pVM VM handle.
834 * @param pVCpu VMCPU handle.
835 */
836static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
837{
838 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
839
840 EMRemLock(pVM);
841
842 /*
843 * Switch to REM, step instruction, switch back.
844 */
845 int rc = REMR3State(pVM, pVCpu);
846 if (RT_SUCCESS(rc))
847 {
848 rc = REMR3Step(pVM, pVCpu);
849 REMR3StateBack(pVM, pVCpu);
850 }
851 EMRemUnlock(pVM);
852
853 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
854 return rc;
855}
856
857
858/**
859 * emR3RemExecute helper that syncs the state back from REM and leave the REM
860 * critical section.
861 *
862 * @returns false - new fInREMState value.
863 * @param pVM The VM handle.
864 * @param pVCpu The virtual CPU handle.
865 */
866DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
867{
868 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
869 REMR3StateBack(pVM, pVCpu);
870 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
871
872 EMRemUnlock(pVM);
873 return false;
874}
875
876
877/**
878 * Executes recompiled code.
879 *
880 * This function contains the recompiler version of the inner
881 * execution loop (the outer loop being in EMR3ExecuteVM()).
882 *
883 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
884 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
885 *
886 * @param pVM VM handle.
887 * @param pVCpu VMCPU handle.
888 * @param pfFFDone Where to store an indicator telling wheter or not
889 * FFs were done before returning.
890 *
891 */
892static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
893{
894#ifdef LOG_ENABLED
895 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
896 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
897
898 if (pCtx->eflags.Bits.u1VM)
899 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
900 else
901 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
902#endif
903 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
904
905#if defined(VBOX_STRICT) && defined(DEBUG_bird)
906 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
907 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo #1419 - get flat address. */
908 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
909#endif
910
911 /*
912 * Spin till we get a forced action which returns anything but VINF_SUCCESS
913 * or the REM suggests raw-mode execution.
914 */
915 *pfFFDone = false;
916 bool fInREMState = false;
917 int rc = VINF_SUCCESS;
918 for (;;)
919 {
920 /*
921 * Lock REM and update the state if not already in sync.
922 *
923 * Note! Big lock, but you are not supposed to own any lock when
924 * coming in here.
925 */
926 if (!fInREMState)
927 {
928 EMRemLock(pVM);
929 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
930
931 /* Flush the recompiler translation blocks if the VCPU has changed,
932 also force a full CPU state resync. */
933 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
934 {
935 REMFlushTBs(pVM);
936 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
937 }
938 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
939
940 rc = REMR3State(pVM, pVCpu);
941
942 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
943 if (RT_FAILURE(rc))
944 break;
945 fInREMState = true;
946
947 /*
948 * We might have missed the raising of VMREQ, TIMER and some other
949 * imporant FFs while we were busy switching the state. So, check again.
950 */
951 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET)
952 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
953 {
954 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
955 goto l_REMDoForcedActions;
956 }
957 }
958
959
960 /*
961 * Execute REM.
962 */
963 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
964 rc = REMR3Run(pVM, pVCpu);
965 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
966
967
968 /*
969 * Deal with high priority post execution FFs before doing anything
970 * else. Sync back the state and leave the lock to be on the safe side.
971 */
972 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
973 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
974 {
975 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
976 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
977 }
978
979 /*
980 * Process the returned status code.
981 */
982 if (rc != VINF_SUCCESS)
983 {
984 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
985 break;
986 if (rc != VINF_REM_INTERRUPED_FF)
987 {
988 /*
989 * Anything which is not known to us means an internal error
990 * and the termination of the VM!
991 */
992 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
993 break;
994 }
995 }
996
997
998 /*
999 * Check and execute forced actions.
1000 *
1001 * Sync back the VM state and leave the lock before calling any of
1002 * these, you never know what's going to happen here.
1003 */
1004#ifdef VBOX_HIGH_RES_TIMERS_HACK
1005 TMTimerPollVoid(pVM, pVCpu);
1006#endif
1007 AssertCompile((VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
1008 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1009 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
1010 {
1011l_REMDoForcedActions:
1012 if (fInREMState)
1013 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1014 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1015 rc = emR3ForcedActions(pVM, pVCpu, rc);
1016 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1017 if ( rc != VINF_SUCCESS
1018 && rc != VINF_EM_RESCHEDULE_REM)
1019 {
1020 *pfFFDone = true;
1021 break;
1022 }
1023 }
1024
1025 } /* The Inner Loop, recompiled execution mode version. */
1026
1027
1028 /*
1029 * Returning. Sync back the VM state if required.
1030 */
1031 if (fInREMState)
1032 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1033
1034 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1035 return rc;
1036}
1037
1038
1039#ifdef DEBUG
1040
1041int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1042{
1043 EMSTATE enmOldState = pVCpu->em.s.enmState;
1044
1045 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1046
1047 Log(("Single step BEGIN:\n"));
1048 for (uint32_t i = 0; i < cIterations; i++)
1049 {
1050 DBGFR3PrgStep(pVCpu);
1051 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1052 emR3RemStep(pVM, pVCpu);
1053 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1054 break;
1055 }
1056 Log(("Single step END:\n"));
1057 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1058 pVCpu->em.s.enmState = enmOldState;
1059 return VINF_EM_RESCHEDULE;
1060}
1061
1062#endif /* DEBUG */
1063
1064
1065/**
1066 * Decides whether to execute RAW, HWACC or REM.
1067 *
1068 * @returns new EM state
1069 * @param pVM The VM.
1070 * @param pVCpu The VMCPU handle.
1071 * @param pCtx The CPU context.
1072 */
1073EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1074{
1075 /*
1076 * When forcing raw-mode execution, things are simple.
1077 */
1078 if (pVCpu->em.s.fForceRAW)
1079 return EMSTATE_RAW;
1080
1081 /*
1082 * We stay in the wait for SIPI state unless explicitly told otherwise.
1083 */
1084 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1085 return EMSTATE_WAIT_SIPI;
1086
1087 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1088 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1089 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1090
1091 X86EFLAGS EFlags = pCtx->eflags;
1092 if (HWACCMIsEnabled(pVM))
1093 {
1094 /* Hardware accelerated raw-mode:
1095 *
1096 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1097 */
1098 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
1099 return EMSTATE_HWACC;
1100
1101 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
1102 * off monitoring features essential for raw mode! */
1103 return EMSTATE_REM;
1104 }
1105
1106 /*
1107 * Standard raw-mode:
1108 *
1109 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1110 * or 32 bits protected mode ring 0 code
1111 *
1112 * The tests are ordered by the likelyhood of being true during normal execution.
1113 */
1114 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1115 {
1116 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1117 return EMSTATE_REM;
1118 }
1119
1120#ifndef VBOX_RAW_V86
1121 if (EFlags.u32 & X86_EFL_VM) {
1122 Log2(("raw mode refused: VM_MASK\n"));
1123 return EMSTATE_REM;
1124 }
1125#endif
1126
1127 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1128 uint32_t u32CR0 = pCtx->cr0;
1129 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1130 {
1131 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1132 return EMSTATE_REM;
1133 }
1134
1135 if (pCtx->cr4 & X86_CR4_PAE)
1136 {
1137 uint32_t u32Dummy, u32Features;
1138
1139 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1140 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1141 return EMSTATE_REM;
1142 }
1143
1144 unsigned uSS = pCtx->ss;
1145 if ( pCtx->eflags.Bits.u1VM
1146 || (uSS & X86_SEL_RPL) == 3)
1147 {
1148 if (!EMIsRawRing3Enabled(pVM))
1149 return EMSTATE_REM;
1150
1151 if (!(EFlags.u32 & X86_EFL_IF))
1152 {
1153 Log2(("raw mode refused: IF (RawR3)\n"));
1154 return EMSTATE_REM;
1155 }
1156
1157 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1158 {
1159 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1160 return EMSTATE_REM;
1161 }
1162 }
1163 else
1164 {
1165 if (!EMIsRawRing0Enabled(pVM))
1166 return EMSTATE_REM;
1167
1168 /* Only ring 0 supervisor code. */
1169 if ((uSS & X86_SEL_RPL) != 0)
1170 {
1171 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1172 return EMSTATE_REM;
1173 }
1174
1175 // Let's start with pure 32 bits ring 0 code first
1176 /** @todo What's pure 32-bit mode? flat? */
1177 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
1178 || !(pCtx->csHid.Attr.n.u1DefBig))
1179 {
1180 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1181 return EMSTATE_REM;
1182 }
1183
1184 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1185 if (!(u32CR0 & X86_CR0_WP))
1186 {
1187 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1188 return EMSTATE_REM;
1189 }
1190
1191 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1192 {
1193 Log2(("raw r0 mode forced: patch code\n"));
1194 return EMSTATE_RAW;
1195 }
1196
1197#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1198 if (!(EFlags.u32 & X86_EFL_IF))
1199 {
1200 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1201 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1202 return EMSTATE_REM;
1203 }
1204#endif
1205
1206 /** @todo still necessary??? */
1207 if (EFlags.Bits.u2IOPL != 0)
1208 {
1209 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1210 return EMSTATE_REM;
1211 }
1212 }
1213
1214 Assert(PGMPhysIsA20Enabled(pVCpu));
1215 return EMSTATE_RAW;
1216}
1217
1218
1219/**
1220 * Executes all high priority post execution force actions.
1221 *
1222 * @returns rc or a fatal status code.
1223 *
1224 * @param pVM VM handle.
1225 * @param pVCpu VMCPU handle.
1226 * @param rc The current rc.
1227 */
1228int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1229{
1230 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1231 PDMCritSectFF(pVCpu);
1232
1233 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1234 CSAMR3DoPendingAction(pVM, pVCpu);
1235
1236 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1237 {
1238 if ( rc > VINF_EM_NO_MEMORY
1239 && rc <= VINF_EM_LAST)
1240 rc = VINF_EM_NO_MEMORY;
1241 }
1242
1243 return rc;
1244}
1245
1246
1247/**
1248 * Executes all pending forced actions.
1249 *
1250 * Forced actions can cause execution delays and execution
1251 * rescheduling. The first we deal with using action priority, so
1252 * that for instance pending timers aren't scheduled and ran until
1253 * right before execution. The rescheduling we deal with using
1254 * return codes. The same goes for VM termination, only in that case
1255 * we exit everything.
1256 *
1257 * @returns VBox status code of equal or greater importance/severity than rc.
1258 * The most important ones are: VINF_EM_RESCHEDULE,
1259 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1260 *
1261 * @param pVM VM handle.
1262 * @param pVCpu VMCPU handle.
1263 * @param rc The current rc.
1264 *
1265 */
1266int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1267{
1268 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1269#ifdef VBOX_STRICT
1270 int rcIrq = VINF_SUCCESS;
1271#endif
1272 int rc2;
1273#define UPDATE_RC() \
1274 do { \
1275 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1276 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1277 break; \
1278 if (!rc || rc2 < rc) \
1279 rc = rc2; \
1280 } while (0)
1281
1282 /*
1283 * Post execution chunk first.
1284 */
1285 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1286 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
1287 {
1288 /*
1289 * EMT Rendezvous (must be serviced before termination).
1290 */
1291 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1292 {
1293 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1294 UPDATE_RC();
1295 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1296 * stopped/reset before the next VM state change is made. We need a better
1297 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1298 * && rc >= VINF_EM_SUSPEND). */
1299 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1300 {
1301 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1302 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1303 return rc;
1304 }
1305 }
1306
1307 /*
1308 * Termination request.
1309 */
1310 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
1311 {
1312 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
1313 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1314 return VINF_EM_TERMINATE;
1315 }
1316
1317 /*
1318 * Debugger Facility polling.
1319 */
1320 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
1321 {
1322 rc2 = DBGFR3VMMForcedAction(pVM);
1323 UPDATE_RC();
1324 }
1325
1326 /*
1327 * Postponed reset request.
1328 */
1329 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
1330 {
1331 rc2 = VMR3Reset(pVM);
1332 UPDATE_RC();
1333 }
1334
1335 /*
1336 * CSAM page scanning.
1337 */
1338 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1339 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1340 {
1341 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1342
1343 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1344 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1345
1346 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1347 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1348 }
1349
1350 /*
1351 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1352 */
1353 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1354 {
1355 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1356 UPDATE_RC();
1357 if (rc == VINF_EM_NO_MEMORY)
1358 return rc;
1359 }
1360
1361 /* check that we got them all */
1362 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1363 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
1364 }
1365
1366 /*
1367 * Normal priority then.
1368 * (Executed in no particular order.)
1369 */
1370 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1371 {
1372 /*
1373 * PDM Queues are pending.
1374 */
1375 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1376 PDMR3QueueFlushAll(pVM);
1377
1378 /*
1379 * PDM DMA transfers are pending.
1380 */
1381 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1382 PDMR3DmaRun(pVM);
1383
1384 /*
1385 * EMT Rendezvous (make sure they are handled before the requests).
1386 */
1387 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1388 {
1389 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1390 UPDATE_RC();
1391 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1392 * stopped/reset before the next VM state change is made. We need a better
1393 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1394 * && rc >= VINF_EM_SUSPEND). */
1395 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1396 {
1397 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1398 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1399 return rc;
1400 }
1401 }
1402
1403 /*
1404 * Requests from other threads.
1405 */
1406 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1407 {
1408 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY);
1409 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1410 {
1411 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1412 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1413 return rc2;
1414 }
1415 UPDATE_RC();
1416 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1417 * stopped/reset before the next VM state change is made. We need a better
1418 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1419 * && rc >= VINF_EM_SUSPEND). */
1420 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1421 {
1422 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1423 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1424 return rc;
1425 }
1426 }
1427
1428 /* Replay the handler notification changes. */
1429 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1430 {
1431 /* Try not to cause deadlocks. */
1432 if ( pVM->cCpus == 1
1433 || ( !PGMIsLockOwner(pVM)
1434 && !IOMIsLockOwner(pVM))
1435 )
1436 {
1437 EMRemLock(pVM);
1438 REMR3ReplayHandlerNotifications(pVM);
1439 EMRemUnlock(pVM);
1440 }
1441 }
1442
1443 /* check that we got them all */
1444 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1445 }
1446
1447 /*
1448 * Normal priority then. (per-VCPU)
1449 * (Executed in no particular order.)
1450 */
1451 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1452 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1453 {
1454 /*
1455 * Requests from other threads.
1456 */
1457 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
1458 {
1459 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu);
1460 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1461 {
1462 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1463 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1464 return rc2;
1465 }
1466 UPDATE_RC();
1467 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1468 * stopped/reset before the next VM state change is made. We need a better
1469 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1470 * && rc >= VINF_EM_SUSPEND). */
1471 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1472 {
1473 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1474 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1475 return rc;
1476 }
1477 }
1478
1479 /* check that we got them all */
1480 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1481 }
1482
1483 /*
1484 * High priority pre execution chunk last.
1485 * (Executed in ascending priority order.)
1486 */
1487 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1488 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1489 {
1490 /*
1491 * Timers before interrupts.
1492 */
1493 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
1494 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1495 TMR3TimerQueuesDo(pVM);
1496
1497 /*
1498 * The instruction following an emulated STI should *always* be executed!
1499 */
1500 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1501 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1502 {
1503 Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1504 if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1505 {
1506 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
1507 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
1508 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
1509 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
1510 */
1511 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1512 }
1513 if (HWACCMR3IsActive(pVCpu))
1514 rc2 = VINF_EM_RESCHEDULE_HWACC;
1515 else
1516 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
1517
1518 UPDATE_RC();
1519 }
1520
1521 /*
1522 * Interrupts.
1523 */
1524 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1525 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1526 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
1527 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1528 && PATMAreInterruptsEnabled(pVM)
1529 && !HWACCMR3IsEventPending(pVCpu))
1530 {
1531 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1532 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1533 {
1534 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1535 /** @todo this really isn't nice, should properly handle this */
1536 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1537#ifdef VBOX_STRICT
1538 rcIrq = rc2;
1539#endif
1540 UPDATE_RC();
1541 }
1542 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1543 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1544 {
1545 rc2 = VINF_EM_RESCHEDULE_REM;
1546 UPDATE_RC();
1547 }
1548 }
1549
1550 /*
1551 * Allocate handy pages.
1552 */
1553 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1554 {
1555 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1556 UPDATE_RC();
1557 }
1558
1559 /*
1560 * Debugger Facility request.
1561 */
1562 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1563 {
1564 rc2 = DBGFR3VMMForcedAction(pVM);
1565 UPDATE_RC();
1566 }
1567
1568 /*
1569 * EMT Rendezvous (must be serviced before termination).
1570 */
1571 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1572 {
1573 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1574 UPDATE_RC();
1575 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1576 * stopped/reset before the next VM state change is made. We need a better
1577 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1578 * && rc >= VINF_EM_SUSPEND). */
1579 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1580 {
1581 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1582 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1583 return rc;
1584 }
1585 }
1586
1587 /*
1588 * Termination request.
1589 */
1590 if (VM_FF_ISPENDING(pVM, VM_FF_TERMINATE))
1591 {
1592 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
1593 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1594 return VINF_EM_TERMINATE;
1595 }
1596
1597 /*
1598 * Out of memory? Since most of our fellow high priority actions may cause us
1599 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1600 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1601 * than us since we can terminate without allocating more memory.
1602 */
1603 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1604 {
1605 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1606 UPDATE_RC();
1607 if (rc == VINF_EM_NO_MEMORY)
1608 return rc;
1609 }
1610
1611 /*
1612 * If the virtual sync clock is still stopped, make TM restart it.
1613 */
1614 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
1615 TMR3VirtualSyncFF(pVM, pVCpu);
1616
1617#ifdef DEBUG
1618 /*
1619 * Debug, pause the VM.
1620 */
1621 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
1622 {
1623 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
1624 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
1625 return VINF_EM_SUSPEND;
1626 }
1627#endif
1628
1629 /* check that we got them all */
1630 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1631 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
1632 }
1633
1634#undef UPDATE_RC
1635 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1636 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1637 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
1638 return rc;
1639}
1640
1641/**
1642 * Release the IOM lock if owned by the current VCPU
1643 *
1644 * @param pVM The VM to operate on.
1645 */
1646VMMR3DECL(void) EMR3ReleaseOwnedLocks(PVM pVM)
1647{
1648 while (PDMCritSectIsOwner(&pVM->em.s.CritSectREM))
1649 PDMCritSectLeave(&pVM->em.s.CritSectREM);
1650}
1651
1652
1653/**
1654 * Execute VM.
1655 *
1656 * This function is the main loop of the VM. The emulation thread
1657 * calls this function when the VM has been successfully constructed
1658 * and we're ready for executing the VM.
1659 *
1660 * Returning from this function means that the VM is turned off or
1661 * suspended (state already saved) and deconstruction in next in line.
1662 *
1663 * All interaction from other thread are done using forced actions
1664 * and signaling of the wait object.
1665 *
1666 * @returns VBox status code, informational status codes may indicate failure.
1667 * @param pVM The VM to operate on.
1668 * @param pVCpu The VMCPU to operate on.
1669 */
1670VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
1671{
1672 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
1673 pVM,
1674 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
1675 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
1676 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
1677 pVCpu->em.s.fForceRAW));
1678 VM_ASSERT_EMT(pVM);
1679 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
1680 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
1681 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
1682 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
1683
1684 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
1685 if (rc == 0)
1686 {
1687 /*
1688 * Start the virtual time.
1689 */
1690 TMR3NotifyResume(pVM, pVCpu);
1691
1692 /*
1693 * The Outer Main Loop.
1694 */
1695 bool fFFDone = false;
1696
1697 /* Reschedule right away to start in the right state. */
1698 rc = VINF_SUCCESS;
1699
1700 /* If resuming after a pause or a state load, restore the previous
1701 state or else we'll start executing code. Else, just reschedule. */
1702 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
1703 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1704 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
1705 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
1706 else
1707 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1708
1709 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1710 for (;;)
1711 {
1712 /*
1713 * Before we can schedule anything (we're here because
1714 * scheduling is required) we must service any pending
1715 * forced actions to avoid any pending action causing
1716 * immediate rescheduling upon entering an inner loop
1717 *
1718 * Do forced actions.
1719 */
1720 if ( !fFFDone
1721 && rc != VINF_EM_TERMINATE
1722 && rc != VINF_EM_OFF
1723 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1724 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK)))
1725 {
1726 rc = emR3ForcedActions(pVM, pVCpu, rc);
1727 if ( ( rc == VINF_EM_RESCHEDULE_REM
1728 || rc == VINF_EM_RESCHEDULE_HWACC)
1729 && pVCpu->em.s.fForceRAW)
1730 rc = VINF_EM_RESCHEDULE_RAW;
1731 }
1732 else if (fFFDone)
1733 fFFDone = false;
1734
1735 /*
1736 * Now what to do?
1737 */
1738 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
1739 switch (rc)
1740 {
1741 /*
1742 * Keep doing what we're currently doing.
1743 */
1744 case VINF_SUCCESS:
1745 break;
1746
1747 /*
1748 * Reschedule - to raw-mode execution.
1749 */
1750 case VINF_EM_RESCHEDULE_RAW:
1751 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVCpu->em.s.enmState, EMSTATE_RAW));
1752 pVCpu->em.s.enmState = EMSTATE_RAW;
1753 break;
1754
1755 /*
1756 * Reschedule - to hardware accelerated raw-mode execution.
1757 */
1758 case VINF_EM_RESCHEDULE_HWACC:
1759 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVCpu->em.s.enmState, EMSTATE_HWACC));
1760 Assert(!pVCpu->em.s.fForceRAW);
1761 pVCpu->em.s.enmState = EMSTATE_HWACC;
1762 break;
1763
1764 /*
1765 * Reschedule - to recompiled execution.
1766 */
1767 case VINF_EM_RESCHEDULE_REM:
1768 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVCpu->em.s.enmState, EMSTATE_REM));
1769 pVCpu->em.s.enmState = EMSTATE_REM;
1770 break;
1771
1772#ifdef VBOX_WITH_VMI
1773 /*
1774 * Reschedule - parav call.
1775 */
1776 case VINF_EM_RESCHEDULE_PARAV:
1777 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_PARAV: %d -> %d (EMSTATE_PARAV)\n", pVCpu->em.s.enmState, EMSTATE_PARAV));
1778 pVCpu->em.s.enmState = EMSTATE_PARAV;
1779 break;
1780#endif
1781
1782 /*
1783 * Resume.
1784 */
1785 case VINF_EM_RESUME:
1786 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVCpu->em.s.enmState));
1787 /* Don't reschedule in the halted or wait for SIPI case. */
1788 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1789 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
1790 break;
1791 /* fall through and get scheduled. */
1792
1793 /*
1794 * Reschedule.
1795 */
1796 case VINF_EM_RESCHEDULE:
1797 {
1798 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1799 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1800 pVCpu->em.s.enmState = enmState;
1801 break;
1802 }
1803
1804 /*
1805 * Halted.
1806 */
1807 case VINF_EM_HALT:
1808 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_HALTED));
1809 pVCpu->em.s.enmState = EMSTATE_HALTED;
1810 break;
1811
1812 /*
1813 * Switch to the wait for SIPI state (application processor only)
1814 */
1815 case VINF_EM_WAIT_SIPI:
1816 Assert(pVCpu->idCpu != 0);
1817 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_WAIT_SIPI));
1818 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1819 break;
1820
1821
1822 /*
1823 * Suspend.
1824 */
1825 case VINF_EM_SUSPEND:
1826 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1827 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1828 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1829 break;
1830
1831 /*
1832 * Reset.
1833 * We might end up doing a double reset for now, we'll have to clean up the mess later.
1834 */
1835 case VINF_EM_RESET:
1836 {
1837 if (pVCpu->idCpu == 0)
1838 {
1839 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1840 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1841 pVCpu->em.s.enmState = enmState;
1842 }
1843 else
1844 {
1845 /* All other VCPUs go into the wait for SIPI state. */
1846 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1847 }
1848 break;
1849 }
1850
1851 /*
1852 * Power Off.
1853 */
1854 case VINF_EM_OFF:
1855 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1856 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1857 TMR3NotifySuspend(pVM, pVCpu);
1858 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1859 return rc;
1860
1861 /*
1862 * Terminate the VM.
1863 */
1864 case VINF_EM_TERMINATE:
1865 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1866 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1867 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
1868 TMR3NotifySuspend(pVM, pVCpu);
1869 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1870 return rc;
1871
1872
1873 /*
1874 * Out of memory, suspend the VM and stuff.
1875 */
1876 case VINF_EM_NO_MEMORY:
1877 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1878 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1879 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1880 TMR3NotifySuspend(pVM, pVCpu);
1881 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1882
1883 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
1884 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
1885 if (rc != VINF_EM_SUSPEND)
1886 {
1887 if (RT_SUCCESS_NP(rc))
1888 {
1889 AssertLogRelMsgFailed(("%Rrc\n", rc));
1890 rc = VERR_EM_INTERNAL_ERROR;
1891 }
1892 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1893 }
1894 return rc;
1895
1896 /*
1897 * Guest debug events.
1898 */
1899 case VINF_EM_DBG_STEPPED:
1900 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
1901 case VINF_EM_DBG_STOP:
1902 case VINF_EM_DBG_BREAKPOINT:
1903 case VINF_EM_DBG_STEP:
1904 if (pVCpu->em.s.enmState == EMSTATE_RAW)
1905 {
1906 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
1907 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1908 }
1909 else
1910 {
1911 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
1912 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1913 }
1914 break;
1915
1916 /*
1917 * Hypervisor debug events.
1918 */
1919 case VINF_EM_DBG_HYPER_STEPPED:
1920 case VINF_EM_DBG_HYPER_BREAKPOINT:
1921 case VINF_EM_DBG_HYPER_ASSERTION:
1922 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_HYPER));
1923 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
1924 break;
1925
1926 /*
1927 * Guru mediations.
1928 */
1929 case VERR_VMM_RING0_ASSERTION:
1930 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
1931 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1932 break;
1933
1934 /*
1935 * Any error code showing up here other than the ones we
1936 * know and process above are considered to be FATAL.
1937 *
1938 * Unknown warnings and informational status codes are also
1939 * included in this.
1940 */
1941 default:
1942 if (RT_SUCCESS_NP(rc))
1943 {
1944 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
1945 rc = VERR_EM_INTERNAL_ERROR;
1946 }
1947 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
1948 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1949 break;
1950 }
1951
1952 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
1953 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1954
1955 /*
1956 * Act on the state.
1957 */
1958 switch (pVCpu->em.s.enmState)
1959 {
1960 /*
1961 * Execute raw.
1962 */
1963 case EMSTATE_RAW:
1964 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
1965 break;
1966
1967 /*
1968 * Execute hardware accelerated raw.
1969 */
1970 case EMSTATE_HWACC:
1971 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
1972 break;
1973
1974 /*
1975 * Execute recompiled.
1976 */
1977 case EMSTATE_REM:
1978 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
1979 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
1980 break;
1981
1982#ifdef VBOX_WITH_VMI
1983 /*
1984 * Execute PARAV function.
1985 */
1986 case EMSTATE_PARAV:
1987 rc = PARAVCallFunction(pVM);
1988 pVCpu->em.s.enmState = EMSTATE_REM;
1989 break;
1990#endif
1991
1992 /*
1993 * Application processor execution halted until SIPI.
1994 */
1995 case EMSTATE_WAIT_SIPI:
1996 /* no break */
1997 /*
1998 * hlt - execution halted until interrupt.
1999 */
2000 case EMSTATE_HALTED:
2001 {
2002 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2003 if (pVCpu->em.s.mwait.fWait & EMMWAIT_FLAG_ACTIVE)
2004 {
2005 /* mwait has a special extension where it's woken up when an interrupt is pending even when IF=0. */
2006 rc = VMR3WaitHalted(pVM, pVCpu, !(pVCpu->em.s.mwait.fWait & EMMWAIT_FLAG_BREAKIRQIF0) && !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2007 pVCpu->em.s.mwait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2008 }
2009 else
2010 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2011
2012 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2013 break;
2014 }
2015
2016 /*
2017 * Suspended - return to VM.cpp.
2018 */
2019 case EMSTATE_SUSPENDED:
2020 TMR3NotifySuspend(pVM, pVCpu);
2021 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2022 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2023 return VINF_EM_SUSPEND;
2024
2025 /*
2026 * Debugging in the guest.
2027 */
2028 case EMSTATE_DEBUG_GUEST_REM:
2029 case EMSTATE_DEBUG_GUEST_RAW:
2030 TMR3NotifySuspend(pVM, pVCpu);
2031 rc = emR3Debug(pVM, pVCpu, rc);
2032 TMR3NotifyResume(pVM, pVCpu);
2033 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2034 break;
2035
2036 /*
2037 * Debugging in the hypervisor.
2038 */
2039 case EMSTATE_DEBUG_HYPER:
2040 {
2041 TMR3NotifySuspend(pVM, pVCpu);
2042 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2043
2044 rc = emR3Debug(pVM, pVCpu, rc);
2045 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2046 if (rc != VINF_SUCCESS)
2047 {
2048 /* switch to guru meditation mode */
2049 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2050 VMMR3FatalDump(pVM, pVCpu, rc);
2051 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2052 return rc;
2053 }
2054
2055 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2056 TMR3NotifyResume(pVM, pVCpu);
2057 break;
2058 }
2059
2060 /*
2061 * Guru meditation takes place in the debugger.
2062 */
2063 case EMSTATE_GURU_MEDITATION:
2064 {
2065 TMR3NotifySuspend(pVM, pVCpu);
2066 VMMR3FatalDump(pVM, pVCpu, rc);
2067 emR3Debug(pVM, pVCpu, rc);
2068 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2069 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2070 return rc;
2071 }
2072
2073 /*
2074 * The states we don't expect here.
2075 */
2076 case EMSTATE_NONE:
2077 case EMSTATE_TERMINATING:
2078 default:
2079 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2080 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2081 TMR3NotifySuspend(pVM, pVCpu);
2082 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2083 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2084 return VERR_EM_INTERNAL_ERROR;
2085 }
2086 } /* The Outer Main Loop */
2087 }
2088 else
2089 {
2090 /*
2091 * Fatal error.
2092 */
2093 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2094 TMR3NotifySuspend(pVM, pVCpu);
2095 VMMR3FatalDump(pVM, pVCpu, rc);
2096 emR3Debug(pVM, pVCpu, rc);
2097 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2098 /** @todo change the VM state! */
2099 return rc;
2100 }
2101
2102 /* (won't ever get here). */
2103 AssertFailed();
2104}
2105
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette