VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 47619

Last change on this file since 47619 was 47619, checked in by vboxsync, 12 years ago

EM: Started on HM single stepping for IEM verification purposes. Trying to fix the HM debugging in the proccess. VT-x only atm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 116.2 KB
Line 
1/* $Id: EM.cpp 47619 2013-08-08 19:06:45Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/patm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/iom.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/pgm.h>
48#ifdef VBOX_WITH_REM
49# include <VBox/vmm/rem.h>
50#endif
51#include <VBox/vmm/tm.h>
52#include <VBox/vmm/mm.h>
53#include <VBox/vmm/ssm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/pdmqueue.h>
57#include <VBox/vmm/hm.h>
58#include <VBox/vmm/patm.h>
59#ifdef IEM_VERIFICATION_MODE
60# include <VBox/vmm/iem.h>
61#endif
62#include "EMInternal.h"
63#include <VBox/vmm/vm.h>
64#include <VBox/vmm/uvm.h>
65#include <VBox/vmm/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include <VBox/vmm/dbgf.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*******************************************************************************
78* Defined Constants And Macros *
79*******************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*******************************************************************************
86* Internal Functions *
87*******************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
95static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
96int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
97
98
99/**
100 * Initializes the EM.
101 *
102 * @returns VBox status code.
103 * @param pVM Pointer to the VM.
104 */
105VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
106{
107 LogFlow(("EMR3Init\n"));
108 /*
109 * Assert alignment and sizes.
110 */
111 AssertCompileMemberAlignment(VM, em.s, 32);
112 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
113 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
114
115 /*
116 * Init the structure.
117 */
118 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
119 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
120 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
121
122 bool fEnabled;
123 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileUser = !fEnabled;
126
127 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->fRecompileSupervisor = !fEnabled;
130
131#ifdef VBOX_WITH_RAW_RING1
132 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
133 AssertLogRelRCReturn(rc, rc);
134#else
135 pVM->fRawRing1Enabled = false; /* Disabled by default. */
136#endif
137
138 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
139 AssertLogRelRCReturn(rc, rc);
140
141 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool\n",
142 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll));
143
144#ifdef VBOX_WITH_REM
145 /*
146 * Initialize the REM critical section.
147 */
148 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
149 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
150 AssertRCReturn(rc, rc);
151#endif
152
153 /*
154 * Saved state.
155 */
156 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
157 NULL, NULL, NULL,
158 NULL, emR3Save, NULL,
159 NULL, emR3Load, NULL);
160 if (RT_FAILURE(rc))
161 return rc;
162
163 for (VMCPUID i = 0; i < pVM->cCpus; i++)
164 {
165 PVMCPU pVCpu = &pVM->aCpus[i];
166
167 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
168
169 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
170 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
171 pVCpu->em.s.fForceRAW = false;
172
173 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
174#ifdef VBOX_WITH_RAW_MODE
175 if (!HMIsEnabled(pVM))
176 {
177 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
178 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
179 }
180#endif
181
182 /* Force reset of the time slice. */
183 pVCpu->em.s.u64TimeSliceStart = 0;
184
185# define EM_REG_COUNTER(a, b, c) \
186 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
187 AssertRC(rc);
188
189# define EM_REG_COUNTER_USED(a, b, c) \
190 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
191 AssertRC(rc);
192
193# define EM_REG_PROFILE(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_PROFILE_ADV(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
199 AssertRC(rc);
200
201 /*
202 * Statistics.
203 */
204#ifdef VBOX_WITH_STATISTICS
205 PEMSTATS pStats;
206 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
207 if (RT_FAILURE(rc))
208 return rc;
209
210 pVCpu->em.s.pStatsR3 = pStats;
211 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
212 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
213
214 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
215 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
216
217 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
218 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
219
220 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
294
295 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
296 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
297
298 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
350
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
353 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
379
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
382 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
383 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
384
385 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
386# ifdef VBOX_WITH_FIRST_IEM_STEP
387 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
388# else
389 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
390 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
391# endif
392 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
393 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
394 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
395 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
416
417 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
418 pVCpu->em.s.pCliStatTree = 0;
419
420 /* these should be considered for release statistics. */
421 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
422 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
423 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
424 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
431
432#endif /* VBOX_WITH_STATISTICS */
433
434 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
435 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
436 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
437 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
438 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
439
440 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
441 }
442
443 emR3InitDbg(pVM);
444 return VINF_SUCCESS;
445}
446
447
448/**
449 * Applies relocations to data and code managed by this
450 * component. This function will be called at init and
451 * whenever the VMM need to relocate it self inside the GC.
452 *
453 * @param pVM Pointer to the VM.
454 */
455VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
456{
457 LogFlow(("EMR3Relocate\n"));
458 for (VMCPUID i = 0; i < pVM->cCpus; i++)
459 {
460 PVMCPU pVCpu = &pVM->aCpus[i];
461 if (pVCpu->em.s.pStatsR3)
462 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
463 }
464}
465
466
467/**
468 * Reset the EM state for a CPU.
469 *
470 * Called by EMR3Reset and hot plugging.
471 *
472 * @param pVCpu Pointer to the VMCPU.
473 */
474VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
475{
476 pVCpu->em.s.fForceRAW = false;
477
478 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
479 out of the HALTED state here so that enmPrevState doesn't end up as
480 HALTED when EMR3Execute returns. */
481 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
482 {
483 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
484 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
485 }
486}
487
488
489/**
490 * Reset notification.
491 *
492 * @param pVM Pointer to the VM.
493 */
494VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
495{
496 Log(("EMR3Reset: \n"));
497 for (VMCPUID i = 0; i < pVM->cCpus; i++)
498 EMR3ResetCpu(&pVM->aCpus[i]);
499}
500
501
502/**
503 * Terminates the EM.
504 *
505 * Termination means cleaning up and freeing all resources,
506 * the VM it self is at this point powered off or suspended.
507 *
508 * @returns VBox status code.
509 * @param pVM Pointer to the VM.
510 */
511VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
512{
513 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
514
515#ifdef VBOX_WITH_REM
516 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
517#endif
518 return VINF_SUCCESS;
519}
520
521
522/**
523 * Execute state save operation.
524 *
525 * @returns VBox status code.
526 * @param pVM Pointer to the VM.
527 * @param pSSM SSM operation handle.
528 */
529static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
530{
531 for (VMCPUID i = 0; i < pVM->cCpus; i++)
532 {
533 PVMCPU pVCpu = &pVM->aCpus[i];
534
535 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
536 AssertRCReturn(rc, rc);
537
538 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
539 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
540 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
541 AssertRCReturn(rc, rc);
542
543 /* Save mwait state. */
544 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
545 AssertRCReturn(rc, rc);
546 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
547 AssertRCReturn(rc, rc);
548 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
549 AssertRCReturn(rc, rc);
550 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
551 AssertRCReturn(rc, rc);
552 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
553 AssertRCReturn(rc, rc);
554 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
555 AssertRCReturn(rc, rc);
556 }
557 return VINF_SUCCESS;
558}
559
560
561/**
562 * Execute state load operation.
563 *
564 * @returns VBox status code.
565 * @param pVM Pointer to the VM.
566 * @param pSSM SSM operation handle.
567 * @param uVersion Data layout version.
568 * @param uPass The data pass.
569 */
570static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
571{
572 /*
573 * Validate version.
574 */
575 if ( uVersion > EM_SAVED_STATE_VERSION
576 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
577 {
578 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
579 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
580 }
581 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
582
583 /*
584 * Load the saved state.
585 */
586 for (VMCPUID i = 0; i < pVM->cCpus; i++)
587 {
588 PVMCPU pVCpu = &pVM->aCpus[i];
589
590 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
591 if (RT_FAILURE(rc))
592 pVCpu->em.s.fForceRAW = false;
593 AssertRCReturn(rc, rc);
594
595 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
596 {
597 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
598 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
599 AssertRCReturn(rc, rc);
600 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
601
602 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
603 }
604 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
605 {
606 /* Load mwait state. */
607 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
608 AssertRCReturn(rc, rc);
609 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
610 AssertRCReturn(rc, rc);
611 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
612 AssertRCReturn(rc, rc);
613 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
614 AssertRCReturn(rc, rc);
615 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
616 AssertRCReturn(rc, rc);
617 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
618 AssertRCReturn(rc, rc);
619 }
620
621 Assert(!pVCpu->em.s.pCliStatTree);
622 }
623 return VINF_SUCCESS;
624}
625
626
627/**
628 * Argument packet for emR3SetExecutionPolicy.
629 */
630struct EMR3SETEXECPOLICYARGS
631{
632 EMEXECPOLICY enmPolicy;
633 bool fEnforce;
634};
635
636
637/**
638 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
639 */
640static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
641{
642 /*
643 * Only the first CPU changes the variables.
644 */
645 if (pVCpu->idCpu == 0)
646 {
647 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
648 switch (pArgs->enmPolicy)
649 {
650 case EMEXECPOLICY_RECOMPILE_RING0:
651 pVM->fRecompileSupervisor = pArgs->fEnforce;
652 break;
653 case EMEXECPOLICY_RECOMPILE_RING3:
654 pVM->fRecompileUser = pArgs->fEnforce;
655 break;
656 case EMEXECPOLICY_IEM_ALL:
657 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
658 break;
659 default:
660 AssertFailedReturn(VERR_INVALID_PARAMETER);
661 }
662 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
663 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
664 }
665
666 /*
667 * Force rescheduling if in RAW, HM, IEM, or REM.
668 */
669 return pVCpu->em.s.enmState == EMSTATE_RAW
670 || pVCpu->em.s.enmState == EMSTATE_HM
671 || pVCpu->em.s.enmState == EMSTATE_IEM
672 || pVCpu->em.s.enmState == EMSTATE_REM
673 ? VINF_EM_RESCHEDULE
674 : VINF_SUCCESS;
675}
676
677
678/**
679 * Changes an execution scheduling policy parameter.
680 *
681 * This is used to enable or disable raw-mode / hardware-virtualization
682 * execution of user and supervisor code.
683 *
684 * @returns VINF_SUCCESS on success.
685 * @returns VINF_RESCHEDULE if a rescheduling might be required.
686 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
687 *
688 * @param pUVM The user mode VM handle.
689 * @param enmPolicy The scheduling policy to change.
690 * @param fEnforce Whether to enforce the policy or not.
691 */
692VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
693{
694 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
695 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
696 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
697
698 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
699 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
700}
701
702
703/**
704 * Queries an execution scheduling policy parameter.
705 *
706 * @returns VBox status code
707 * @param pUVM The user mode VM handle.
708 * @param enmPolicy The scheduling policy to query.
709 * @param pfEnforced Where to return the current value.
710 */
711VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
712{
713 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
714 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
715 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
716 PVM pVM = pUVM->pVM;
717 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
718
719 /* No need to bother EMTs with a query. */
720 switch (enmPolicy)
721 {
722 case EMEXECPOLICY_RECOMPILE_RING0:
723 *pfEnforced = pVM->fRecompileSupervisor;
724 break;
725 case EMEXECPOLICY_RECOMPILE_RING3:
726 *pfEnforced = pVM->fRecompileUser;
727 break;
728 case EMEXECPOLICY_IEM_ALL:
729 *pfEnforced = pVM->em.s.fIemExecutesAll;
730 break;
731 default:
732 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
733 }
734
735 return VINF_SUCCESS;
736}
737
738
739/**
740 * Raise a fatal error.
741 *
742 * Safely terminate the VM with full state report and stuff. This function
743 * will naturally never return.
744 *
745 * @param pVCpu Pointer to the VMCPU.
746 * @param rc VBox status code.
747 */
748VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
749{
750 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
751 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
752 AssertReleaseMsgFailed(("longjmp returned!\n"));
753}
754
755
756#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
757/**
758 * Gets the EM state name.
759 *
760 * @returns pointer to read only state name,
761 * @param enmState The state.
762 */
763static const char *emR3GetStateName(EMSTATE enmState)
764{
765 switch (enmState)
766 {
767 case EMSTATE_NONE: return "EMSTATE_NONE";
768 case EMSTATE_RAW: return "EMSTATE_RAW";
769 case EMSTATE_HM: return "EMSTATE_HM";
770 case EMSTATE_IEM: return "EMSTATE_IEM";
771 case EMSTATE_REM: return "EMSTATE_REM";
772 case EMSTATE_HALTED: return "EMSTATE_HALTED";
773 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
774 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
775 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
776 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
777 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
778 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
779 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
780 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
781 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
782 default: return "Unknown!";
783 }
784}
785#endif /* LOG_ENABLED || VBOX_STRICT */
786
787
788/**
789 * Debug loop.
790 *
791 * @returns VBox status code for EM.
792 * @param pVM Pointer to the VM.
793 * @param pVCpu Pointer to the VMCPU.
794 * @param rc Current EM VBox status code.
795 */
796static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
797{
798 for (;;)
799 {
800 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
801 const VBOXSTRICTRC rcLast = rc;
802
803 /*
804 * Debug related RC.
805 */
806 switch (VBOXSTRICTRC_VAL(rc))
807 {
808 /*
809 * Single step an instruction.
810 */
811 case VINF_EM_DBG_STEP:
812 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
813 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
814 || pVCpu->em.s.fForceRAW /* paranoia */)
815#ifdef VBOX_WITH_RAW_MODE
816 rc = emR3RawStep(pVM, pVCpu);
817#else
818 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
819#endif
820 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
821 rc = EMR3HmSingleInstruction(pVM, pVCpu);
822#ifdef VBOX_WITH_REM
823 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
824 rc = emR3RemStep(pVM, pVCpu);
825#endif
826 else
827 {
828 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
829 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
830 rc = VINF_EM_DBG_STEPPED;
831 }
832 break;
833
834 /*
835 * Simple events: stepped, breakpoint, stop/assertion.
836 */
837 case VINF_EM_DBG_STEPPED:
838 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
839 break;
840
841 case VINF_EM_DBG_BREAKPOINT:
842 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
843 break;
844
845 case VINF_EM_DBG_STOP:
846 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
847 break;
848
849 case VINF_EM_DBG_HYPER_STEPPED:
850 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
851 break;
852
853 case VINF_EM_DBG_HYPER_BREAKPOINT:
854 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
855 break;
856
857 case VINF_EM_DBG_HYPER_ASSERTION:
858 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
859 RTLogFlush(NULL);
860 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
861 break;
862
863 /*
864 * Guru meditation.
865 */
866 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
867 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
868 break;
869 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
870 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
871 break;
872
873 default: /** @todo don't use default for guru, but make special errors code! */
874 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
875 break;
876 }
877
878 /*
879 * Process the result.
880 */
881 do
882 {
883 switch (VBOXSTRICTRC_VAL(rc))
884 {
885 /*
886 * Continue the debugging loop.
887 */
888 case VINF_EM_DBG_STEP:
889 case VINF_EM_DBG_STOP:
890 case VINF_EM_DBG_STEPPED:
891 case VINF_EM_DBG_BREAKPOINT:
892 case VINF_EM_DBG_HYPER_STEPPED:
893 case VINF_EM_DBG_HYPER_BREAKPOINT:
894 case VINF_EM_DBG_HYPER_ASSERTION:
895 break;
896
897 /*
898 * Resuming execution (in some form) has to be done here if we got
899 * a hypervisor debug event.
900 */
901 case VINF_SUCCESS:
902 case VINF_EM_RESUME:
903 case VINF_EM_SUSPEND:
904 case VINF_EM_RESCHEDULE:
905 case VINF_EM_RESCHEDULE_RAW:
906 case VINF_EM_RESCHEDULE_REM:
907 case VINF_EM_HALT:
908 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
909 {
910#ifdef VBOX_WITH_RAW_MODE
911 rc = emR3RawResumeHyper(pVM, pVCpu);
912 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
913 continue;
914#else
915 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
916#endif
917 }
918 if (rc == VINF_SUCCESS)
919 rc = VINF_EM_RESCHEDULE;
920 return rc;
921
922 /*
923 * The debugger isn't attached.
924 * We'll simply turn the thing off since that's the easiest thing to do.
925 */
926 case VERR_DBGF_NOT_ATTACHED:
927 switch (VBOXSTRICTRC_VAL(rcLast))
928 {
929 case VINF_EM_DBG_HYPER_STEPPED:
930 case VINF_EM_DBG_HYPER_BREAKPOINT:
931 case VINF_EM_DBG_HYPER_ASSERTION:
932 case VERR_TRPM_PANIC:
933 case VERR_TRPM_DONT_PANIC:
934 case VERR_VMM_RING0_ASSERTION:
935 case VERR_VMM_HYPER_CR3_MISMATCH:
936 case VERR_VMM_RING3_CALL_DISABLED:
937 return rcLast;
938 }
939 return VINF_EM_OFF;
940
941 /*
942 * Status codes terminating the VM in one or another sense.
943 */
944 case VINF_EM_TERMINATE:
945 case VINF_EM_OFF:
946 case VINF_EM_RESET:
947 case VINF_EM_NO_MEMORY:
948 case VINF_EM_RAW_STALE_SELECTOR:
949 case VINF_EM_RAW_IRET_TRAP:
950 case VERR_TRPM_PANIC:
951 case VERR_TRPM_DONT_PANIC:
952 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
953 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
954 case VERR_VMM_RING0_ASSERTION:
955 case VERR_VMM_HYPER_CR3_MISMATCH:
956 case VERR_VMM_RING3_CALL_DISABLED:
957 case VERR_INTERNAL_ERROR:
958 case VERR_INTERNAL_ERROR_2:
959 case VERR_INTERNAL_ERROR_3:
960 case VERR_INTERNAL_ERROR_4:
961 case VERR_INTERNAL_ERROR_5:
962 case VERR_IPE_UNEXPECTED_STATUS:
963 case VERR_IPE_UNEXPECTED_INFO_STATUS:
964 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
965 return rc;
966
967 /*
968 * The rest is unexpected, and will keep us here.
969 */
970 default:
971 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
972 break;
973 }
974 } while (false);
975 } /* debug for ever */
976}
977
978
979/**
980 * Steps recompiled code.
981 *
982 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
983 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
984 *
985 * @param pVM Pointer to the VM.
986 * @param pVCpu Pointer to the VMCPU.
987 */
988static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
989{
990 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
991
992#ifdef VBOX_WITH_REM
993 EMRemLock(pVM);
994
995 /*
996 * Switch to REM, step instruction, switch back.
997 */
998 int rc = REMR3State(pVM, pVCpu);
999 if (RT_SUCCESS(rc))
1000 {
1001 rc = REMR3Step(pVM, pVCpu);
1002 REMR3StateBack(pVM, pVCpu);
1003 }
1004 EMRemUnlock(pVM);
1005
1006#else
1007 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1008#endif
1009
1010 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1011 return rc;
1012}
1013
1014
1015/**
1016 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1017 * critical section.
1018 *
1019 * @returns false - new fInREMState value.
1020 * @param pVM Pointer to the VM.
1021 * @param pVCpu Pointer to the VMCPU.
1022 */
1023DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1024{
1025#ifdef VBOX_WITH_REM
1026 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1027 REMR3StateBack(pVM, pVCpu);
1028 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1029
1030 EMRemUnlock(pVM);
1031#endif
1032 return false;
1033}
1034
1035
1036/**
1037 * Executes recompiled code.
1038 *
1039 * This function contains the recompiler version of the inner
1040 * execution loop (the outer loop being in EMR3ExecuteVM()).
1041 *
1042 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1043 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1044 *
1045 * @param pVM Pointer to the VM.
1046 * @param pVCpu Pointer to the VMCPU.
1047 * @param pfFFDone Where to store an indicator telling whether or not
1048 * FFs were done before returning.
1049 *
1050 */
1051static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1052{
1053#ifdef LOG_ENABLED
1054 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1055 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1056
1057 if (pCtx->eflags.Bits.u1VM)
1058 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1059 else
1060 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1061#endif
1062 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1063
1064#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1065 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1066 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1067 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1068#endif
1069
1070 /*
1071 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1072 * or the REM suggests raw-mode execution.
1073 */
1074 *pfFFDone = false;
1075#ifdef VBOX_WITH_REM
1076 bool fInREMState = false;
1077#endif
1078 int rc = VINF_SUCCESS;
1079 for (;;)
1080 {
1081#ifdef VBOX_WITH_REM
1082 /*
1083 * Lock REM and update the state if not already in sync.
1084 *
1085 * Note! Big lock, but you are not supposed to own any lock when
1086 * coming in here.
1087 */
1088 if (!fInREMState)
1089 {
1090 EMRemLock(pVM);
1091 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1092
1093 /* Flush the recompiler translation blocks if the VCPU has changed,
1094 also force a full CPU state resync. */
1095 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1096 {
1097 REMFlushTBs(pVM);
1098 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1099 }
1100 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1101
1102 rc = REMR3State(pVM, pVCpu);
1103
1104 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1105 if (RT_FAILURE(rc))
1106 break;
1107 fInREMState = true;
1108
1109 /*
1110 * We might have missed the raising of VMREQ, TIMER and some other
1111 * important FFs while we were busy switching the state. So, check again.
1112 */
1113 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1114 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1115 {
1116 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1117 goto l_REMDoForcedActions;
1118 }
1119 }
1120#endif
1121
1122 /*
1123 * Execute REM.
1124 */
1125 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1126 {
1127 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1128#ifdef VBOX_WITH_REM
1129 rc = REMR3Run(pVM, pVCpu);
1130#else
1131 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
1132#endif
1133 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1134 }
1135 else
1136 {
1137 /* Give up this time slice; virtual time continues */
1138 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1139 RTThreadSleep(5);
1140 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1141 rc = VINF_SUCCESS;
1142 }
1143
1144 /*
1145 * Deal with high priority post execution FFs before doing anything
1146 * else. Sync back the state and leave the lock to be on the safe side.
1147 */
1148 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1149 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1150 {
1151#ifdef VBOX_WITH_REM
1152 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1153#endif
1154 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1155 }
1156
1157 /*
1158 * Process the returned status code.
1159 */
1160 if (rc != VINF_SUCCESS)
1161 {
1162 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1163 break;
1164 if (rc != VINF_REM_INTERRUPED_FF)
1165 {
1166 /*
1167 * Anything which is not known to us means an internal error
1168 * and the termination of the VM!
1169 */
1170 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1171 break;
1172 }
1173 }
1174
1175
1176 /*
1177 * Check and execute forced actions.
1178 *
1179 * Sync back the VM state and leave the lock before calling any of
1180 * these, you never know what's going to happen here.
1181 */
1182#ifdef VBOX_HIGH_RES_TIMERS_HACK
1183 TMTimerPollVoid(pVM, pVCpu);
1184#endif
1185 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1186 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1187 || VMCPU_FF_IS_PENDING(pVCpu,
1188 VMCPU_FF_ALL_REM_MASK
1189 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1190 {
1191l_REMDoForcedActions:
1192#ifdef VBOX_WITH_REM
1193 if (fInREMState)
1194 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1195#endif
1196 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1197 rc = emR3ForcedActions(pVM, pVCpu, rc);
1198 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1199 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1200 if ( rc != VINF_SUCCESS
1201 && rc != VINF_EM_RESCHEDULE_REM)
1202 {
1203 *pfFFDone = true;
1204 break;
1205 }
1206 }
1207
1208 } /* The Inner Loop, recompiled execution mode version. */
1209
1210
1211#ifdef VBOX_WITH_REM
1212 /*
1213 * Returning. Sync back the VM state if required.
1214 */
1215 if (fInREMState)
1216 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1217#endif
1218
1219 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1220 return rc;
1221}
1222
1223
1224#ifdef DEBUG
1225
1226int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1227{
1228 EMSTATE enmOldState = pVCpu->em.s.enmState;
1229
1230 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1231
1232 Log(("Single step BEGIN:\n"));
1233 for (uint32_t i = 0; i < cIterations; i++)
1234 {
1235 DBGFR3PrgStep(pVCpu);
1236 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1237 emR3RemStep(pVM, pVCpu);
1238 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1239 break;
1240 }
1241 Log(("Single step END:\n"));
1242 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1243 pVCpu->em.s.enmState = enmOldState;
1244 return VINF_EM_RESCHEDULE;
1245}
1246
1247#endif /* DEBUG */
1248
1249
1250/**
1251 * Decides whether to execute RAW, HWACC or REM.
1252 *
1253 * @returns new EM state
1254 * @param pVM Pointer to the VM.
1255 * @param pVCpu Pointer to the VMCPU.
1256 * @param pCtx Pointer to the guest CPU context.
1257 */
1258EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1259{
1260#ifdef IEM_VERIFICATION_MODE
1261 return EMSTATE_REM;
1262#else
1263
1264 /*
1265 * When forcing raw-mode execution, things are simple.
1266 */
1267 if (pVCpu->em.s.fForceRAW)
1268 return EMSTATE_RAW;
1269
1270 /*
1271 * We stay in the wait for SIPI state unless explicitly told otherwise.
1272 */
1273 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1274 return EMSTATE_WAIT_SIPI;
1275
1276 /*
1277 * Execute everything in IEM?
1278 */
1279 if (pVM->em.s.fIemExecutesAll)
1280 return EMSTATE_IEM;
1281
1282 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1283 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1284 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1285
1286 X86EFLAGS EFlags = pCtx->eflags;
1287 if (HMIsEnabled(pVM))
1288 {
1289 /*
1290 * Hardware accelerated raw-mode:
1291 */
1292 if ( EMIsHwVirtExecutionEnabled(pVM)
1293 && HMR3CanExecuteGuest(pVM, pCtx))
1294 return EMSTATE_HM;
1295
1296 /*
1297 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1298 * turns off monitoring features essential for raw mode!
1299 */
1300 return EMSTATE_REM;
1301 }
1302
1303 /*
1304 * Standard raw-mode:
1305 *
1306 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1307 * or 32 bits protected mode ring 0 code
1308 *
1309 * The tests are ordered by the likelihood of being true during normal execution.
1310 */
1311 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1312 {
1313 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1314 return EMSTATE_REM;
1315 }
1316
1317# ifndef VBOX_RAW_V86
1318 if (EFlags.u32 & X86_EFL_VM) {
1319 Log2(("raw mode refused: VM_MASK\n"));
1320 return EMSTATE_REM;
1321 }
1322# endif
1323
1324 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1325 uint32_t u32CR0 = pCtx->cr0;
1326 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1327 {
1328 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1329 return EMSTATE_REM;
1330 }
1331
1332 if (pCtx->cr4 & X86_CR4_PAE)
1333 {
1334 uint32_t u32Dummy, u32Features;
1335
1336 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1337 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1338 return EMSTATE_REM;
1339 }
1340
1341 unsigned uSS = pCtx->ss.Sel;
1342 if ( pCtx->eflags.Bits.u1VM
1343 || (uSS & X86_SEL_RPL) == 3)
1344 {
1345 if (!EMIsRawRing3Enabled(pVM))
1346 return EMSTATE_REM;
1347
1348 if (!(EFlags.u32 & X86_EFL_IF))
1349 {
1350 Log2(("raw mode refused: IF (RawR3)\n"));
1351 return EMSTATE_REM;
1352 }
1353
1354 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1355 {
1356 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1357 return EMSTATE_REM;
1358 }
1359 }
1360 else
1361 {
1362 if (!EMIsRawRing0Enabled(pVM))
1363 return EMSTATE_REM;
1364
1365 if (EMIsRawRing1Enabled(pVM))
1366 {
1367 /* Only ring 0 and 1 supervisor code. */
1368 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1369 {
1370 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1371 return EMSTATE_REM;
1372 }
1373 }
1374 /* Only ring 0 supervisor code. */
1375 else if ((uSS & X86_SEL_RPL) != 0)
1376 {
1377 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1378 return EMSTATE_REM;
1379 }
1380
1381 // Let's start with pure 32 bits ring 0 code first
1382 /** @todo What's pure 32-bit mode? flat? */
1383 if ( !(pCtx->ss.Attr.n.u1DefBig)
1384 || !(pCtx->cs.Attr.n.u1DefBig))
1385 {
1386 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1387 return EMSTATE_REM;
1388 }
1389
1390 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1391 if (!(u32CR0 & X86_CR0_WP))
1392 {
1393 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1394 return EMSTATE_REM;
1395 }
1396
1397# ifdef VBOX_WITH_RAW_MODE
1398 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1399 {
1400 Log2(("raw r0 mode forced: patch code\n"));
1401# ifdef VBOX_WITH_SAFE_STR
1402 Assert(pCtx->tr.Sel);
1403# endif
1404 return EMSTATE_RAW;
1405 }
1406# endif /* VBOX_WITH_RAW_MODE */
1407
1408# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1409 if (!(EFlags.u32 & X86_EFL_IF))
1410 {
1411 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1412 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1413 return EMSTATE_REM;
1414 }
1415# endif
1416
1417# ifndef VBOX_WITH_RAW_RING1
1418 /** @todo still necessary??? */
1419 if (EFlags.Bits.u2IOPL != 0)
1420 {
1421 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1422 return EMSTATE_REM;
1423 }
1424# endif
1425 }
1426
1427 /*
1428 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1429 */
1430 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1431 {
1432 Log2(("raw mode refused: stale CS\n"));
1433 return EMSTATE_REM;
1434 }
1435 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1436 {
1437 Log2(("raw mode refused: stale SS\n"));
1438 return EMSTATE_REM;
1439 }
1440 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1441 {
1442 Log2(("raw mode refused: stale DS\n"));
1443 return EMSTATE_REM;
1444 }
1445 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1446 {
1447 Log2(("raw mode refused: stale ES\n"));
1448 return EMSTATE_REM;
1449 }
1450 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1451 {
1452 Log2(("raw mode refused: stale FS\n"));
1453 return EMSTATE_REM;
1454 }
1455 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1456 {
1457 Log2(("raw mode refused: stale GS\n"));
1458 return EMSTATE_REM;
1459 }
1460
1461# ifdef VBOX_WITH_SAFE_STR
1462 if (pCtx->tr.Sel == 0)
1463 {
1464 Log(("Raw mode refused -> TR=0\n"));
1465 return EMSTATE_REM;
1466 }
1467# endif
1468
1469 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1470 return EMSTATE_RAW;
1471#endif /* !IEM_VERIFICATION_MODE */
1472
1473}
1474
1475
1476/**
1477 * Executes all high priority post execution force actions.
1478 *
1479 * @returns rc or a fatal status code.
1480 *
1481 * @param pVM Pointer to the VM.
1482 * @param pVCpu Pointer to the VMCPU.
1483 * @param rc The current rc.
1484 */
1485int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1486{
1487 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1488
1489 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1490 PDMCritSectBothFF(pVCpu);
1491
1492 /* Update CR3 (Nested Paging case for HM). */
1493 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1494 {
1495 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1496 if (RT_FAILURE(rc2))
1497 return rc2;
1498 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1499 }
1500
1501 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1502 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1503 {
1504 if (CPUMIsGuestInPAEMode(pVCpu))
1505 {
1506 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1507 AssertPtr(pPdpes);
1508
1509 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1510 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1511 }
1512 else
1513 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1514 }
1515
1516#ifdef VBOX_WITH_RAW_MODE
1517 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1518 CSAMR3DoPendingAction(pVM, pVCpu);
1519#endif
1520
1521 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1522 {
1523 if ( rc > VINF_EM_NO_MEMORY
1524 && rc <= VINF_EM_LAST)
1525 rc = VINF_EM_NO_MEMORY;
1526 }
1527
1528 return rc;
1529}
1530
1531
1532/**
1533 * Executes all pending forced actions.
1534 *
1535 * Forced actions can cause execution delays and execution
1536 * rescheduling. The first we deal with using action priority, so
1537 * that for instance pending timers aren't scheduled and ran until
1538 * right before execution. The rescheduling we deal with using
1539 * return codes. The same goes for VM termination, only in that case
1540 * we exit everything.
1541 *
1542 * @returns VBox status code of equal or greater importance/severity than rc.
1543 * The most important ones are: VINF_EM_RESCHEDULE,
1544 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1545 *
1546 * @param pVM Pointer to the VM.
1547 * @param pVCpu Pointer to the VMCPU.
1548 * @param rc The current rc.
1549 *
1550 */
1551int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1552{
1553 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1554#ifdef VBOX_STRICT
1555 int rcIrq = VINF_SUCCESS;
1556#endif
1557 int rc2;
1558#define UPDATE_RC() \
1559 do { \
1560 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1561 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1562 break; \
1563 if (!rc || rc2 < rc) \
1564 rc = rc2; \
1565 } while (0)
1566 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1567
1568 /*
1569 * Post execution chunk first.
1570 */
1571 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1572 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1573 {
1574 /*
1575 * EMT Rendezvous (must be serviced before termination).
1576 */
1577 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1578 {
1579 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1580 UPDATE_RC();
1581 /** @todo HACK ALERT! The following test is to make sure EM+TM
1582 * thinks the VM is stopped/reset before the next VM state change
1583 * is made. We need a better solution for this, or at least make it
1584 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1585 * VINF_EM_SUSPEND). */
1586 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1587 {
1588 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1589 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1590 return rc;
1591 }
1592 }
1593
1594 /*
1595 * State change request (cleared by vmR3SetStateLocked).
1596 */
1597 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1598 {
1599 VMSTATE enmState = VMR3GetState(pVM);
1600 switch (enmState)
1601 {
1602 case VMSTATE_FATAL_ERROR:
1603 case VMSTATE_FATAL_ERROR_LS:
1604 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1605 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1606 return VINF_EM_SUSPEND;
1607
1608 case VMSTATE_DESTROYING:
1609 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1610 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1611 return VINF_EM_TERMINATE;
1612
1613 default:
1614 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1615 }
1616 }
1617
1618 /*
1619 * Debugger Facility polling.
1620 */
1621 if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF))
1622 {
1623 rc2 = DBGFR3VMMForcedAction(pVM);
1624 UPDATE_RC();
1625 }
1626
1627 /*
1628 * Postponed reset request.
1629 */
1630 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1631 {
1632 rc2 = VMR3Reset(pVM->pUVM);
1633 UPDATE_RC();
1634 }
1635
1636#ifdef VBOX_WITH_RAW_MODE
1637 /*
1638 * CSAM page scanning.
1639 */
1640 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1641 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1642 {
1643 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1644
1645 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1646 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1647
1648 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1649 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1650 }
1651#endif
1652
1653 /*
1654 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1655 */
1656 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1657 {
1658 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1659 UPDATE_RC();
1660 if (rc == VINF_EM_NO_MEMORY)
1661 return rc;
1662 }
1663
1664 /* check that we got them all */
1665 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1666 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0));
1667 }
1668
1669 /*
1670 * Normal priority then.
1671 * (Executed in no particular order.)
1672 */
1673 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1674 {
1675 /*
1676 * PDM Queues are pending.
1677 */
1678 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1679 PDMR3QueueFlushAll(pVM);
1680
1681 /*
1682 * PDM DMA transfers are pending.
1683 */
1684 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1685 PDMR3DmaRun(pVM);
1686
1687 /*
1688 * EMT Rendezvous (make sure they are handled before the requests).
1689 */
1690 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1691 {
1692 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1693 UPDATE_RC();
1694 /** @todo HACK ALERT! The following test is to make sure EM+TM
1695 * thinks the VM is stopped/reset before the next VM state change
1696 * is made. We need a better solution for this, or at least make it
1697 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1698 * VINF_EM_SUSPEND). */
1699 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1700 {
1701 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1702 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1703 return rc;
1704 }
1705 }
1706
1707 /*
1708 * Requests from other threads.
1709 */
1710 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1711 {
1712 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1713 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1714 {
1715 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1716 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1717 return rc2;
1718 }
1719 UPDATE_RC();
1720 /** @todo HACK ALERT! The following test is to make sure EM+TM
1721 * thinks the VM is stopped/reset before the next VM state change
1722 * is made. We need a better solution for this, or at least make it
1723 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1724 * VINF_EM_SUSPEND). */
1725 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1726 {
1727 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1728 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1729 return rc;
1730 }
1731 }
1732
1733#ifdef VBOX_WITH_REM
1734 /* Replay the handler notification changes. */
1735 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1736 {
1737 /* Try not to cause deadlocks. */
1738 if ( pVM->cCpus == 1
1739 || ( !PGMIsLockOwner(pVM)
1740 && !IOMIsLockWriteOwner(pVM))
1741 )
1742 {
1743 EMRemLock(pVM);
1744 REMR3ReplayHandlerNotifications(pVM);
1745 EMRemUnlock(pVM);
1746 }
1747 }
1748#endif
1749
1750 /* check that we got them all */
1751 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1752 }
1753
1754 /*
1755 * Normal priority then. (per-VCPU)
1756 * (Executed in no particular order.)
1757 */
1758 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1759 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1760 {
1761 /*
1762 * Requests from other threads.
1763 */
1764 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1765 {
1766 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1767 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1768 {
1769 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1770 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1771 return rc2;
1772 }
1773 UPDATE_RC();
1774 /** @todo HACK ALERT! The following test is to make sure EM+TM
1775 * thinks the VM is stopped/reset before the next VM state change
1776 * is made. We need a better solution for this, or at least make it
1777 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1778 * VINF_EM_SUSPEND). */
1779 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1780 {
1781 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1782 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1783 return rc;
1784 }
1785 }
1786
1787 /* check that we got them all */
1788 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1789 }
1790
1791 /*
1792 * High priority pre execution chunk last.
1793 * (Executed in ascending priority order.)
1794 */
1795 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1796 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1797 {
1798 /*
1799 * Timers before interrupts.
1800 */
1801 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1802 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1803 TMR3TimerQueuesDo(pVM);
1804
1805 /*
1806 * The instruction following an emulated STI should *always* be executed!
1807 *
1808 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1809 * the eip is the same as the inhibited instr address. Before we
1810 * are able to execute this instruction in raw mode (iret to
1811 * guest code) an external interrupt might force a world switch
1812 * again. Possibly allowing a guest interrupt to be dispatched
1813 * in the process. This could break the guest. Sounds very
1814 * unlikely, but such timing sensitive problem are not as rare as
1815 * you might think.
1816 */
1817 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1818 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1819 {
1820 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1821 {
1822 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1823 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1824 }
1825 else
1826 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1827 }
1828
1829 /*
1830 * Interrupts.
1831 */
1832 bool fWakeupPending = false;
1833 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1834 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1835 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1836 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1837#ifdef VBOX_WITH_RAW_MODE
1838 && PATMAreInterruptsEnabled(pVM)
1839#else
1840 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1841#endif
1842 && !HMR3IsEventPending(pVCpu))
1843 {
1844 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1845 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1846 {
1847 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1848 /** @todo this really isn't nice, should properly handle this */
1849 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1850 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1851 rc2 = VINF_EM_RESCHEDULE;
1852#ifdef VBOX_STRICT
1853 rcIrq = rc2;
1854#endif
1855 UPDATE_RC();
1856 /* Reschedule required: We must not miss the wakeup below! */
1857 fWakeupPending = true;
1858 }
1859#ifdef VBOX_WITH_REM
1860 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1861 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1862 {
1863 Log2(("REMR3QueryPendingInterrupt -> %#x\n", REMR3QueryPendingInterrupt(pVM, pVCpu)));
1864 rc2 = VINF_EM_RESCHEDULE_REM;
1865 UPDATE_RC();
1866 }
1867#endif
1868 }
1869
1870 /*
1871 * Allocate handy pages.
1872 */
1873 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1874 {
1875 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1876 UPDATE_RC();
1877 }
1878
1879 /*
1880 * Debugger Facility request.
1881 */
1882 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1883 {
1884 rc2 = DBGFR3VMMForcedAction(pVM);
1885 UPDATE_RC();
1886 }
1887
1888 /*
1889 * EMT Rendezvous (must be serviced before termination).
1890 */
1891 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1892 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1893 {
1894 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1895 UPDATE_RC();
1896 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1897 * stopped/reset before the next VM state change is made. We need a better
1898 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1899 * && rc >= VINF_EM_SUSPEND). */
1900 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1901 {
1902 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1903 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1904 return rc;
1905 }
1906 }
1907
1908 /*
1909 * State change request (cleared by vmR3SetStateLocked).
1910 */
1911 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1912 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1913 {
1914 VMSTATE enmState = VMR3GetState(pVM);
1915 switch (enmState)
1916 {
1917 case VMSTATE_FATAL_ERROR:
1918 case VMSTATE_FATAL_ERROR_LS:
1919 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1920 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1921 return VINF_EM_SUSPEND;
1922
1923 case VMSTATE_DESTROYING:
1924 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1925 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1926 return VINF_EM_TERMINATE;
1927
1928 default:
1929 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1930 }
1931 }
1932
1933 /*
1934 * Out of memory? Since most of our fellow high priority actions may cause us
1935 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1936 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1937 * than us since we can terminate without allocating more memory.
1938 */
1939 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1940 {
1941 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1942 UPDATE_RC();
1943 if (rc == VINF_EM_NO_MEMORY)
1944 return rc;
1945 }
1946
1947 /*
1948 * If the virtual sync clock is still stopped, make TM restart it.
1949 */
1950 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
1951 TMR3VirtualSyncFF(pVM, pVCpu);
1952
1953#ifdef DEBUG
1954 /*
1955 * Debug, pause the VM.
1956 */
1957 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
1958 {
1959 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
1960 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
1961 return VINF_EM_SUSPEND;
1962 }
1963#endif
1964
1965 /* check that we got them all */
1966 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1967 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
1968 }
1969
1970#undef UPDATE_RC
1971 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1972 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1973 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
1974 return rc;
1975}
1976
1977
1978/**
1979 * Check if the preset execution time cap restricts guest execution scheduling.
1980 *
1981 * @returns true if allowed, false otherwise
1982 * @param pVM Pointer to the VM.
1983 * @param pVCpu Pointer to the VMCPU.
1984 */
1985bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
1986{
1987 uint64_t u64UserTime, u64KernelTime;
1988
1989 if ( pVM->uCpuExecutionCap != 100
1990 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
1991 {
1992 uint64_t u64TimeNow = RTTimeMilliTS();
1993 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
1994 {
1995 /* New time slice. */
1996 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
1997 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
1998 pVCpu->em.s.u64TimeSliceExec = 0;
1999 }
2000 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2001
2002 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2003 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2004 return false;
2005 }
2006 return true;
2007}
2008
2009
2010/**
2011 * Execute VM.
2012 *
2013 * This function is the main loop of the VM. The emulation thread
2014 * calls this function when the VM has been successfully constructed
2015 * and we're ready for executing the VM.
2016 *
2017 * Returning from this function means that the VM is turned off or
2018 * suspended (state already saved) and deconstruction is next in line.
2019 *
2020 * All interaction from other thread are done using forced actions
2021 * and signaling of the wait object.
2022 *
2023 * @returns VBox status code, informational status codes may indicate failure.
2024 * @param pVM Pointer to the VM.
2025 * @param pVCpu Pointer to the VMCPU.
2026 */
2027VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2028{
2029 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2030 pVM,
2031 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2032 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2033 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2034 pVCpu->em.s.fForceRAW));
2035 VM_ASSERT_EMT(pVM);
2036 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2037 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2038 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2039 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2040
2041 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2042 if (rc == 0)
2043 {
2044 /*
2045 * Start the virtual time.
2046 */
2047 TMR3NotifyResume(pVM, pVCpu);
2048
2049 /*
2050 * The Outer Main Loop.
2051 */
2052 bool fFFDone = false;
2053
2054 /* Reschedule right away to start in the right state. */
2055 rc = VINF_SUCCESS;
2056
2057 /* If resuming after a pause or a state load, restore the previous
2058 state or else we'll start executing code. Else, just reschedule. */
2059 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2060 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2061 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2062 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2063 else
2064 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2065 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2066
2067 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2068 for (;;)
2069 {
2070 /*
2071 * Before we can schedule anything (we're here because
2072 * scheduling is required) we must service any pending
2073 * forced actions to avoid any pending action causing
2074 * immediate rescheduling upon entering an inner loop
2075 *
2076 * Do forced actions.
2077 */
2078 if ( !fFFDone
2079 && RT_SUCCESS(rc)
2080 && rc != VINF_EM_TERMINATE
2081 && rc != VINF_EM_OFF
2082 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2083 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2084 {
2085 rc = emR3ForcedActions(pVM, pVCpu, rc);
2086 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2087 if ( ( rc == VINF_EM_RESCHEDULE_REM
2088 || rc == VINF_EM_RESCHEDULE_HM)
2089 && pVCpu->em.s.fForceRAW)
2090 rc = VINF_EM_RESCHEDULE_RAW;
2091 }
2092 else if (fFFDone)
2093 fFFDone = false;
2094
2095 /*
2096 * Now what to do?
2097 */
2098 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2099 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2100 switch (rc)
2101 {
2102 /*
2103 * Keep doing what we're currently doing.
2104 */
2105 case VINF_SUCCESS:
2106 break;
2107
2108 /*
2109 * Reschedule - to raw-mode execution.
2110 */
2111 case VINF_EM_RESCHEDULE_RAW:
2112 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2113 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2114 pVCpu->em.s.enmState = EMSTATE_RAW;
2115 break;
2116
2117 /*
2118 * Reschedule - to hardware accelerated raw-mode execution.
2119 */
2120 case VINF_EM_RESCHEDULE_HM:
2121 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2122 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2123 Assert(!pVCpu->em.s.fForceRAW);
2124 pVCpu->em.s.enmState = EMSTATE_HM;
2125 break;
2126
2127 /*
2128 * Reschedule - to recompiled execution.
2129 */
2130 case VINF_EM_RESCHEDULE_REM:
2131 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2132 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2133 pVCpu->em.s.enmState = EMSTATE_REM;
2134 break;
2135
2136 /*
2137 * Resume.
2138 */
2139 case VINF_EM_RESUME:
2140 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2141 /* Don't reschedule in the halted or wait for SIPI case. */
2142 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2143 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2144 {
2145 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2146 break;
2147 }
2148 /* fall through and get scheduled. */
2149
2150 /*
2151 * Reschedule.
2152 */
2153 case VINF_EM_RESCHEDULE:
2154 {
2155 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2156 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2157 pVCpu->em.s.enmState = enmState;
2158 break;
2159 }
2160
2161 /*
2162 * Halted.
2163 */
2164 case VINF_EM_HALT:
2165 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2166 pVCpu->em.s.enmState = EMSTATE_HALTED;
2167 break;
2168
2169 /*
2170 * Switch to the wait for SIPI state (application processor only)
2171 */
2172 case VINF_EM_WAIT_SIPI:
2173 Assert(pVCpu->idCpu != 0);
2174 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2175 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2176 break;
2177
2178
2179 /*
2180 * Suspend.
2181 */
2182 case VINF_EM_SUSPEND:
2183 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2184 Assert(enmOldState != EMSTATE_SUSPENDED);
2185 pVCpu->em.s.enmPrevState = enmOldState;
2186 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2187 break;
2188
2189 /*
2190 * Reset.
2191 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2192 */
2193 case VINF_EM_RESET:
2194 {
2195 if (pVCpu->idCpu == 0)
2196 {
2197 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2198 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2199 pVCpu->em.s.enmState = enmState;
2200 }
2201 else
2202 {
2203 /* All other VCPUs go into the wait for SIPI state. */
2204 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2205 }
2206 break;
2207 }
2208
2209 /*
2210 * Power Off.
2211 */
2212 case VINF_EM_OFF:
2213 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2214 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2215 TMR3NotifySuspend(pVM, pVCpu);
2216 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2217 return rc;
2218
2219 /*
2220 * Terminate the VM.
2221 */
2222 case VINF_EM_TERMINATE:
2223 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2224 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2225 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2226 TMR3NotifySuspend(pVM, pVCpu);
2227 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2228 return rc;
2229
2230
2231 /*
2232 * Out of memory, suspend the VM and stuff.
2233 */
2234 case VINF_EM_NO_MEMORY:
2235 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2236 Assert(enmOldState != EMSTATE_SUSPENDED);
2237 pVCpu->em.s.enmPrevState = enmOldState;
2238 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2239 TMR3NotifySuspend(pVM, pVCpu);
2240 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2241
2242 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2243 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2244 if (rc != VINF_EM_SUSPEND)
2245 {
2246 if (RT_SUCCESS_NP(rc))
2247 {
2248 AssertLogRelMsgFailed(("%Rrc\n", rc));
2249 rc = VERR_EM_INTERNAL_ERROR;
2250 }
2251 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2252 }
2253 return rc;
2254
2255 /*
2256 * Guest debug events.
2257 */
2258 case VINF_EM_DBG_STEPPED:
2259 case VINF_EM_DBG_STOP:
2260 case VINF_EM_DBG_BREAKPOINT:
2261 case VINF_EM_DBG_STEP:
2262 if (enmOldState == EMSTATE_RAW)
2263 {
2264 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2265 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2266 }
2267 else if (enmOldState == EMSTATE_HM)
2268 {
2269 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2270 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2271 }
2272 else if (enmOldState == EMSTATE_REM)
2273 {
2274 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2275 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2276 }
2277 else
2278 {
2279 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2280 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2281 }
2282 break;
2283
2284 /*
2285 * Hypervisor debug events.
2286 */
2287 case VINF_EM_DBG_HYPER_STEPPED:
2288 case VINF_EM_DBG_HYPER_BREAKPOINT:
2289 case VINF_EM_DBG_HYPER_ASSERTION:
2290 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2291 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2292 break;
2293
2294 /*
2295 * Guru mediations.
2296 */
2297 case VERR_VMM_RING0_ASSERTION:
2298 case VINF_EM_TRIPLE_FAULT:
2299 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2300 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2301 break;
2302
2303 /*
2304 * Any error code showing up here other than the ones we
2305 * know and process above are considered to be FATAL.
2306 *
2307 * Unknown warnings and informational status codes are also
2308 * included in this.
2309 */
2310 default:
2311 if (RT_SUCCESS_NP(rc))
2312 {
2313 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2314 rc = VERR_EM_INTERNAL_ERROR;
2315 }
2316 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2317 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2318 break;
2319 }
2320
2321 /*
2322 * Act on state transition.
2323 */
2324 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2325 if (enmOldState != enmNewState)
2326 {
2327 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2328
2329 /* Clear MWait flags. */
2330 if ( enmOldState == EMSTATE_HALTED
2331 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2332 && ( enmNewState == EMSTATE_RAW
2333 || enmNewState == EMSTATE_HM
2334 || enmNewState == EMSTATE_REM
2335 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2336 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2337 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2338 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2339 {
2340 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2341 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2342 }
2343 }
2344 else
2345 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2346
2347 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2348 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2349
2350 /*
2351 * Act on the new state.
2352 */
2353 switch (enmNewState)
2354 {
2355 /*
2356 * Execute raw.
2357 */
2358 case EMSTATE_RAW:
2359#ifndef IEM_VERIFICATION_MODE /* remove later */
2360# ifdef VBOX_WITH_RAW_MODE
2361 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2362# else
2363 AssertLogRelMsgFailed(("%Rrc\n", rc));
2364 rc = VERR_EM_INTERNAL_ERROR;
2365# endif
2366 break;
2367#endif
2368
2369 /*
2370 * Execute hardware accelerated raw.
2371 */
2372 case EMSTATE_HM:
2373#ifndef IEM_VERIFICATION_MODE /* remove later */
2374 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2375 break;
2376#endif
2377
2378 /*
2379 * Execute recompiled.
2380 */
2381 case EMSTATE_REM:
2382#ifdef IEM_VERIFICATION_MODE
2383# if 1
2384 rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); fFFDone = false;
2385# else
2386 rc = VBOXSTRICTRC_TODO(REMR3EmulateInstruction(pVM, pVCpu)); fFFDone = false;
2387 if (rc == VINF_EM_RESCHEDULE)
2388 rc = VINF_SUCCESS;
2389# endif
2390#else
2391 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2392#endif
2393 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2394 break;
2395
2396 /*
2397 * Execute in the interpreter.
2398 */
2399 case EMSTATE_IEM:
2400#if 0 /* For testing purposes. */
2401 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu));
2402 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2403 rc = VINF_SUCCESS;
2404 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2405#endif
2406 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
2407 if (pVM->em.s.fIemExecutesAll)
2408 {
2409 Assert(rc != VINF_EM_RESCHEDULE_REM);
2410 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2411 Assert(rc != VINF_EM_RESCHEDULE_HM);
2412 }
2413 fFFDone = false;
2414 break;
2415
2416 /*
2417 * Application processor execution halted until SIPI.
2418 */
2419 case EMSTATE_WAIT_SIPI:
2420 /* no break */
2421 /*
2422 * hlt - execution halted until interrupt.
2423 */
2424 case EMSTATE_HALTED:
2425 {
2426 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2427 /* MWAIT has a special extension where it's woken up when
2428 an interrupt is pending even when IF=0. */
2429 if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2430 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2431 {
2432 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2433 if ( rc == VINF_SUCCESS
2434 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2435 {
2436 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2437 rc = VINF_EM_RESCHEDULE;
2438 }
2439 }
2440 else
2441 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2442
2443 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2444 break;
2445 }
2446
2447 /*
2448 * Suspended - return to VM.cpp.
2449 */
2450 case EMSTATE_SUSPENDED:
2451 TMR3NotifySuspend(pVM, pVCpu);
2452 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2453 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2454 return VINF_EM_SUSPEND;
2455
2456 /*
2457 * Debugging in the guest.
2458 */
2459 case EMSTATE_DEBUG_GUEST_RAW:
2460 case EMSTATE_DEBUG_GUEST_HM:
2461 case EMSTATE_DEBUG_GUEST_IEM:
2462 case EMSTATE_DEBUG_GUEST_REM:
2463 TMR3NotifySuspend(pVM, pVCpu);
2464 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2465 TMR3NotifyResume(pVM, pVCpu);
2466 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2467 break;
2468
2469 /*
2470 * Debugging in the hypervisor.
2471 */
2472 case EMSTATE_DEBUG_HYPER:
2473 {
2474 TMR3NotifySuspend(pVM, pVCpu);
2475 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2476
2477 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2478 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2479 if (rc != VINF_SUCCESS)
2480 {
2481 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2482 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2483 else
2484 {
2485 /* switch to guru meditation mode */
2486 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2487 VMMR3FatalDump(pVM, pVCpu, rc);
2488 }
2489 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2490 return rc;
2491 }
2492
2493 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2494 TMR3NotifyResume(pVM, pVCpu);
2495 break;
2496 }
2497
2498 /*
2499 * Guru meditation takes place in the debugger.
2500 */
2501 case EMSTATE_GURU_MEDITATION:
2502 {
2503 TMR3NotifySuspend(pVM, pVCpu);
2504 VMMR3FatalDump(pVM, pVCpu, rc);
2505 emR3Debug(pVM, pVCpu, rc);
2506 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2507 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2508 return rc;
2509 }
2510
2511 /*
2512 * The states we don't expect here.
2513 */
2514 case EMSTATE_NONE:
2515 case EMSTATE_TERMINATING:
2516 default:
2517 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2518 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2519 TMR3NotifySuspend(pVM, pVCpu);
2520 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2521 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2522 return VERR_EM_INTERNAL_ERROR;
2523 }
2524 } /* The Outer Main Loop */
2525 }
2526 else
2527 {
2528 /*
2529 * Fatal error.
2530 */
2531 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2532 TMR3NotifySuspend(pVM, pVCpu);
2533 VMMR3FatalDump(pVM, pVCpu, rc);
2534 emR3Debug(pVM, pVCpu, rc);
2535 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2536 /** @todo change the VM state! */
2537 return rc;
2538 }
2539
2540 /* (won't ever get here). */
2541 AssertFailed();
2542}
2543
2544/**
2545 * Notify EM of a state change (used by FTM)
2546 *
2547 * @param pVM Pointer to the VM.
2548 */
2549VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2550{
2551 PVMCPU pVCpu = VMMGetCpu(pVM);
2552
2553 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2554 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2555 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2556 return VINF_SUCCESS;
2557}
2558
2559/**
2560 * Notify EM of a state change (used by FTM)
2561 *
2562 * @param pVM Pointer to the VM.
2563 */
2564VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2565{
2566 PVMCPU pVCpu = VMMGetCpu(pVM);
2567 EMSTATE enmCurState = pVCpu->em.s.enmState;
2568
2569 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2570 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2571 pVCpu->em.s.enmPrevState = enmCurState;
2572 return VINF_SUCCESS;
2573}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette