VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 107227

Last change on this file since 107227 was 107227, checked in by vboxsync, 6 weeks ago

VMM: Cleaning up ARMv8 / x86 split. jiraref:VBP-1470

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 115.7 KB
Line 
1/* $Id: EM.cpp 107227 2024-12-04 15:20:14Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RmExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/pdmapic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/err.h>
73#include "VMMTracing.h"
74
75#include <iprt/asm.h>
76#include <iprt/string.h>
77#include <iprt/stream.h>
78#include <iprt/thread.h>
79
80#include "EMInline.h"
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92
93
94/**
95 * Initializes the EM.
96 *
97 * @returns VBox status code.
98 * @param pVM The cross context VM structure.
99 */
100VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
101{
102 LogFlow(("EMR3Init\n"));
103 /*
104 * Assert alignment and sizes.
105 */
106 AssertCompileMemberAlignment(VM, em.s, 32);
107 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
108 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
109 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
110
111 /*
112 * Init the structure.
113 */
114 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
115 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
116
117 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
118#if (defined(VBOX_VMM_TARGET_X86) && !defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) \
119 || (defined(VBOX_VMM_TARGET_ARMV8) && !defined(RT_ARCH_ARM64)) /** @todo not main exec engine = iem/recomp would be better... */
120 true
121#else
122 false
123#endif
124 );
125 AssertLogRelRCReturn(rc, rc);
126
127 bool fEnabled;
128 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->em.s.fGuruOnTripleFault = !fEnabled;
131 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
132 {
133 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
134 pVM->em.s.fGuruOnTripleFault = true;
135 }
136
137 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
138
139 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
140 * Whether to try correlate exit history in any context, detect hot spots and
141 * try optimize these using IEM if there are other exits close by. This
142 * overrides the context specific settings. */
143 bool fExitOptimizationEnabled = true;
144 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
145 AssertLogRelRCReturn(rc, rc);
146
147 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
148 * Whether to optimize exits in ring-0. Setting this to false will also disable
149 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
150 * capabilities of the host kernel, this optimization may be unavailable. */
151 bool fExitOptimizationEnabledR0 = true;
152 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
153 AssertLogRelRCReturn(rc, rc);
154 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
155
156 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
157 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
158 * hooks are in effect). */
159 /** @todo change the default to true here */
160 bool fExitOptimizationEnabledR0PreemptDisabled = true;
161 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
162 AssertLogRelRCReturn(rc, rc);
163 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
164
165 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
166 * Maximum number of instruction to let EMHistoryExec execute in one go. */
167 uint16_t cHistoryExecMaxInstructions = 8192;
168 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
169 AssertLogRelRCReturn(rc, rc);
170 if (cHistoryExecMaxInstructions < 16)
171 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
172
173 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
174 * Maximum number of instruction between exits during probing. */
175 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
176#ifdef RT_OS_WINDOWS
177 if (VM_IS_NEM_ENABLED(pVM))
178 cHistoryProbeMaxInstructionsWithoutExit = 32;
179#endif
180 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
181 cHistoryProbeMaxInstructionsWithoutExit);
182 AssertLogRelRCReturn(rc, rc);
183 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
184 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
185 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
186
187 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
188 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
189 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
190 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
191 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
192 cHistoryProbeMinInstructions);
193 AssertLogRelRCReturn(rc, rc);
194
195 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
196 {
197 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
198 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
199 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
200 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
201 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
202 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
203 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
204 }
205
206#ifdef VBOX_WITH_IEM_RECOMPILER
207 /** @cfgm{/EM/IemRecompiled, bool, true}
208 * Whether IEM bulk execution is recompiled or interpreted. */
209 rc = CFGMR3QueryBoolDef(pCfgEM, "IemRecompiled", &pVM->em.s.fIemRecompiled, true);
210 AssertLogRelRCReturn(rc, rc);
211#endif
212
213 /*
214 * Saved state.
215 */
216 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
217 NULL, NULL, NULL,
218 NULL, emR3Save, NULL,
219 NULL, emR3Load, NULL);
220 if (RT_FAILURE(rc))
221 return rc;
222
223 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
224 {
225 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
226
227 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
228 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
229 pVCpu->em.s.msTimeSliceStart = 0; /* paranoia */
230 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
231
232# define EM_REG_COUNTER(a, b, c) \
233 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
234 AssertRC(rc);
235
236# define EM_REG_COUNTER_USED(a, b, c) \
237 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
238 AssertRC(rc);
239
240# define EM_REG_PROFILE(a, b, c) \
241 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
242 AssertRC(rc);
243
244# define EM_REG_PROFILE_ADV(a, b, c) \
245 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
246 AssertRC(rc);
247
248 /*
249 * Statistics.
250 */
251#ifdef VBOX_WITH_STATISTICS
252 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
253 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
254
255 /* these should be considered for release statistics. */
256 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
257 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
258 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
259#endif
260 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
261 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
262#ifdef VBOX_WITH_STATISTICS
263 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
264 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
265 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
266#endif
267 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
268 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
269#ifdef VBOX_WITH_STATISTICS
270 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
271#endif
272
273 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
274 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
275 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
276 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RecompilerExecute (excluding FFs).");
277
278 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
279
280 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
281 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
282 AssertRC(rc);
283
284 /* History record statistics */
285 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
286 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
287 AssertRC(rc);
288
289 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
290 {
291 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
292 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
293 AssertRC(rc);
294 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
295 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
296 AssertRC(rc);
297 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
298 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
299 AssertRC(rc);
300 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
301 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
302 AssertRC(rc);
303 }
304
305 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
306 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
307 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
308 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
309 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
312 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
313 }
314
315 emR3InitDbg(pVM);
316 return VINF_SUCCESS;
317}
318
319
320/**
321 * Called when a VM initialization stage is completed.
322 *
323 * @returns VBox status code.
324 * @param pVM The cross context VM structure.
325 * @param enmWhat The initialization state that was completed.
326 */
327VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
328{
329 if (enmWhat == VMINITCOMPLETED_RING0)
330 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
331 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
332 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
333 return VINF_SUCCESS;
334}
335
336
337/**
338 * Applies relocations to data and code managed by this
339 * component. This function will be called at init and
340 * whenever the VMM need to relocate it self inside the GC.
341 *
342 * @param pVM The cross context VM structure.
343 */
344VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
345{
346 LogFlow(("EMR3Relocate\n"));
347 RT_NOREF(pVM);
348}
349
350
351/**
352 * Reset the EM state for a CPU.
353 *
354 * Called by EMR3Reset and hot plugging.
355 *
356 * @param pVCpu The cross context virtual CPU structure.
357 */
358VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
359{
360 /* Reset scheduling state. */
361 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
362
363 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
364 out of the HALTED state here so that enmPrevState doesn't end up as
365 HALTED when EMR3Execute returns. */
366 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
367 {
368 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
369 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
370 }
371}
372
373
374/**
375 * Reset notification.
376 *
377 * @param pVM The cross context VM structure.
378 */
379VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
380{
381 Log(("EMR3Reset: \n"));
382 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
383 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
384}
385
386
387/**
388 * Terminates the EM.
389 *
390 * Termination means cleaning up and freeing all resources,
391 * the VM it self is at this point powered off or suspended.
392 *
393 * @returns VBox status code.
394 * @param pVM The cross context VM structure.
395 */
396VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
397{
398 RT_NOREF(pVM);
399 return VINF_SUCCESS;
400}
401
402
403/**
404 * Execute state save operation.
405 *
406 * @returns VBox status code.
407 * @param pVM The cross context VM structure.
408 * @param pSSM SSM operation handle.
409 */
410static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
411{
412 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
413 {
414 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
415
416 SSMR3PutBool(pSSM, false /*fForceRAW*/);
417
418 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
419 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
420 SSMR3PutU32(pSSM,
421 pVCpu->em.s.enmPrevState == EMSTATE_NONE
422 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED
423 || pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
424 ? pVCpu->em.s.enmPrevState : EMSTATE_NONE);
425
426 /* Save mwait state. */
427 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
428 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
429 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
430 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
431 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
432 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
433 AssertRCReturn(rc, rc);
434 }
435 return VINF_SUCCESS;
436}
437
438
439/**
440 * Execute state load operation.
441 *
442 * @returns VBox status code.
443 * @param pVM The cross context VM structure.
444 * @param pSSM SSM operation handle.
445 * @param uVersion Data layout version.
446 * @param uPass The data pass.
447 */
448static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
449{
450 /*
451 * Validate version.
452 */
453 if ( uVersion > EM_SAVED_STATE_VERSION
454 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
455 {
456 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
457 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
458 }
459 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
460
461 /*
462 * Load the saved state.
463 */
464 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
465 {
466 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
467
468 bool fForceRAWIgnored;
469 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
470 AssertRCReturn(rc, rc);
471
472 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
473 {
474 /* We are only intereseted in two enmPrevState values for use when
475 EMR3ExecuteVM is called.
476 Since ~r157540. only these two and EMSTATE_NONE are saved. */
477 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
478 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
479 if ( pVCpu->em.s.enmPrevState != EMSTATE_WAIT_SIPI
480 && pVCpu->em.s.enmPrevState != EMSTATE_HALTED)
481 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
482
483 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
484 }
485 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
486 {
487 /* Load mwait state. */
488 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
489 AssertRCReturn(rc, rc);
490 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
491 AssertRCReturn(rc, rc);
492 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
493 AssertRCReturn(rc, rc);
494 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
495 AssertRCReturn(rc, rc);
496 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
497 AssertRCReturn(rc, rc);
498 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
499 AssertRCReturn(rc, rc);
500 }
501 }
502 return VINF_SUCCESS;
503}
504
505
506/**
507 * Argument packet for emR3SetExecutionPolicy.
508 */
509struct EMR3SETEXECPOLICYARGS
510{
511 EMEXECPOLICY enmPolicy;
512 bool fEnforce;
513};
514
515
516/**
517 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
518 */
519static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
520{
521 /*
522 * Only the first CPU changes the variables.
523 */
524 if (pVCpu->idCpu == 0)
525 {
526 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
527 switch (pArgs->enmPolicy)
528 {
529 case EMEXECPOLICY_IEM_ALL:
530 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
531
532 /* For making '.alliem 1' useful during debugging, transition the
533 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
534 for (VMCPUID i = 0; i < pVM->cCpus; i++)
535 {
536 PVMCPU pVCpuX = pVM->apCpusR3[i];
537 switch (pVCpuX->em.s.enmState)
538 {
539 case EMSTATE_DEBUG_GUEST_RECOMPILER:
540 if (pVM->em.s.fIemRecompiled)
541 break;
542 RT_FALL_THROUGH();
543 case EMSTATE_DEBUG_GUEST_RAW:
544 case EMSTATE_DEBUG_GUEST_HM:
545 case EMSTATE_DEBUG_GUEST_NEM:
546 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
547 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
548 break;
549 case EMSTATE_DEBUG_GUEST_IEM:
550 default:
551 break;
552 }
553 }
554 break;
555
556 case EMEXECPOLICY_IEM_RECOMPILED:
557 pVM->em.s.fIemRecompiled = pArgs->fEnforce;
558 break;
559
560 default:
561 AssertFailedReturn(VERR_INVALID_PARAMETER);
562 }
563 Log(("EM: Set execution policy: fIemExecutesAll=%RTbool fIemRecompiled=%RTbool\n",
564 pVM->em.s.fIemExecutesAll, pVM->em.s.fIemRecompiled));
565 }
566
567 /*
568 * Force rescheduling if in HM, NEM, IEM/interpreter or IEM/recompiler.
569 */
570 Assert(pVCpu->em.s.enmState != EMSTATE_RAW_OBSOLETE);
571 return pVCpu->em.s.enmState == EMSTATE_HM
572 || pVCpu->em.s.enmState == EMSTATE_NEM
573 || pVCpu->em.s.enmState == EMSTATE_IEM
574 || pVCpu->em.s.enmState == EMSTATE_RECOMPILER
575 ? VINF_EM_RESCHEDULE
576 : VINF_SUCCESS;
577}
578
579
580/**
581 * Changes an execution scheduling policy parameter.
582 *
583 * This is used to enable or disable raw-mode / hardware-virtualization
584 * execution of user and supervisor code.
585 *
586 * @returns VINF_SUCCESS on success.
587 * @returns VINF_RESCHEDULE if a rescheduling might be required.
588 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
589 *
590 * @param pUVM The user mode VM handle.
591 * @param enmPolicy The scheduling policy to change.
592 * @param fEnforce Whether to enforce the policy or not.
593 */
594VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
595{
596 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
597 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
598 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
599
600 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
601 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
602}
603
604
605/**
606 * Queries an execution scheduling policy parameter.
607 *
608 * @returns VBox status code
609 * @param pUVM The user mode VM handle.
610 * @param enmPolicy The scheduling policy to query.
611 * @param pfEnforced Where to return the current value.
612 */
613VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
614{
615 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
616 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
617 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
618 PVM pVM = pUVM->pVM;
619 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
620
621 /* No need to bother EMTs with a query. */
622 switch (enmPolicy)
623 {
624 case EMEXECPOLICY_IEM_ALL:
625 *pfEnforced = pVM->em.s.fIemExecutesAll;
626 break;
627 case EMEXECPOLICY_IEM_RECOMPILED:
628 *pfEnforced = pVM->em.s.fIemRecompiled;
629 break;
630 default:
631 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
632 }
633
634 return VINF_SUCCESS;
635}
636
637
638/**
639 * Queries the main execution engine of the VM.
640 *
641 * @returns VBox status code
642 * @param pUVM The user mode VM handle.
643 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
644 */
645VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
646{
647 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
648 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
649
650 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
651 PVM pVM = pUVM->pVM;
652 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
653
654 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
655 return VINF_SUCCESS;
656}
657
658
659/**
660 * Raise a fatal error.
661 *
662 * Safely terminate the VM with full state report and stuff. This function
663 * will naturally never return.
664 *
665 * @param pVCpu The cross context virtual CPU structure.
666 * @param rc VBox status code.
667 */
668VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
669{
670 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
671 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
672}
673
674
675#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
676/**
677 * Gets the EM state name.
678 *
679 * @returns pointer to read only state name,
680 * @param enmState The state.
681 */
682static const char *emR3GetStateName(EMSTATE enmState)
683{
684 switch (enmState)
685 {
686 case EMSTATE_NONE: return "EMSTATE_NONE";
687 case EMSTATE_RAW_OBSOLETE: return "EMSTATE_RAW_OBSOLETE";
688 case EMSTATE_HM: return "EMSTATE_HM";
689 case EMSTATE_IEM: return "EMSTATE_IEM";
690 case EMSTATE_RECOMPILER: return "EMSTATE_RECOMPILER";
691 case EMSTATE_HALTED: return "EMSTATE_HALTED";
692 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
693 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
694 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
695 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
696 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
697 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
698 case EMSTATE_DEBUG_GUEST_RECOMPILER: return "EMSTATE_DEBUG_GUEST_RECOMPILER";
699 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
700 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
701 case EMSTATE_IEM_THEN_REM_OBSOLETE: return "EMSTATE_IEM_THEN_REM_OBSOLETE";
702 case EMSTATE_NEM: return "EMSTATE_NEM";
703 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
704 default: return "Unknown!";
705 }
706}
707#endif /* LOG_ENABLED || VBOX_STRICT */
708
709#ifdef VBOX_VMM_TARGET_X86
710
711/**
712 * Handle pending ring-3 I/O port write.
713 *
714 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
715 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
716 *
717 * @returns Strict VBox status code.
718 * @param pVM The cross context VM structure.
719 * @param pVCpu The cross context virtual CPU structure.
720 */
721VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
722{
723 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
724
725 /* Get and clear the pending data. */
726 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
727 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
728 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
729 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
730 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
731
732 /* Assert sanity. */
733 switch (cbValue)
734 {
735 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
736 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
737 case 4: break;
738 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
739 }
740 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
741
742 /* Do the work.*/
743 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
744 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
745 if (IOM_SUCCESS(rcStrict))
746 {
747 pVCpu->cpum.GstCtx.rip += cbInstr;
748 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
749 }
750 return rcStrict;
751}
752
753
754/**
755 * Handle pending ring-3 I/O port write.
756 *
757 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
758 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
759 *
760 * @returns Strict VBox status code.
761 * @param pVM The cross context VM structure.
762 * @param pVCpu The cross context virtual CPU structure.
763 */
764VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
765{
766 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
767
768 /* Get and clear the pending data. */
769 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
770 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
771 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
772 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
773
774 /* Assert sanity. */
775 switch (cbValue)
776 {
777 case 1: break;
778 case 2: break;
779 case 4: break;
780 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
781 }
782 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
783 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
784
785 /* Do the work.*/
786 uint32_t uValue = 0;
787 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
788 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
789 if (IOM_SUCCESS(rcStrict))
790 {
791 if (cbValue == 4)
792 pVCpu->cpum.GstCtx.rax = uValue;
793 else if (cbValue == 2)
794 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
795 else
796 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
797 pVCpu->cpum.GstCtx.rip += cbInstr;
798 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
799 }
800 return rcStrict;
801}
802
803
804/**
805 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
806 * Worker for emR3ExecuteSplitLockInstruction}
807 */
808static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
809{
810 /* Only execute on the specified EMT. */
811 if (pVCpu == (PVMCPU)pvUser)
812 {
813 LogFunc(("\n"));
814 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
815 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
816 if (rcStrict == VINF_IEM_RAISED_XCPT)
817 rcStrict = VINF_SUCCESS;
818 return rcStrict;
819 }
820 RT_NOREF(pVM);
821 return VINF_SUCCESS;
822}
823
824
825/**
826 * Handle an instruction causing a split cacheline lock access in SMP VMs.
827 *
828 * Generally we only get here if the host has split-lock detection enabled and
829 * this caused an \#AC because of something the guest did. If we interpret the
830 * instruction as-is, we'll likely just repeat the split-lock access and
831 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
832 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
833 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
834 * disregard the lock prefix when emulating the instruction.
835 *
836 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
837 * feature when entering guest context, but the support for the feature isn't a
838 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
839 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
840 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
841 * propert detection to SUPDrv later if we find it necessary.
842 *
843 * @see @bugref{10052}
844 *
845 * @returns Strict VBox status code.
846 * @param pVM The cross context VM structure.
847 * @param pVCpu The cross context virtual CPU structure.
848 */
849VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
850{
851 LogFunc(("\n"));
852 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
853}
854
855#endif /* VBOX_VMM_TARGET_X86 */
856
857/**
858 * Debug loop.
859 *
860 * @returns VBox status code for EM.
861 * @param pVM The cross context VM structure.
862 * @param pVCpu The cross context virtual CPU structure.
863 * @param rc Current EM VBox status code.
864 */
865static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
866{
867 for (;;)
868 {
869 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
870 const VBOXSTRICTRC rcLast = rc;
871
872 /*
873 * Debug related RC.
874 */
875 switch (VBOXSTRICTRC_VAL(rc))
876 {
877 /*
878 * Single step an instruction.
879 */
880 case VINF_EM_DBG_STEP:
881 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
882 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
883 AssertLogRelMsgFailedStmt(("Bad EM state."), rc = VERR_EM_INTERNAL_ERROR);
884#ifdef VBOX_WITH_HWVIRT
885 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
886 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
887#endif
888 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
889 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
890 else
891 {
892#if defined(VBOX_VMM_TARGET_X86) /** @todo IEM/arm64 */
893 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
894 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
895 rc = VINF_EM_DBG_STEPPED;
896#else
897 AssertFailed();
898 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
899#endif
900 }
901
902#ifdef VBOX_VMM_TARGET_X86
903 if (rc != VINF_EM_EMULATE_SPLIT_LOCK)
904 { /* likely */ }
905 else
906 {
907 rc = emR3ExecuteSplitLockInstruction(pVM, pVCpu);
908 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
909 rc = VINF_EM_DBG_STEPPED;
910 }
911#endif
912 break;
913
914 /*
915 * Simple events: stepped, breakpoint, stop/assertion.
916 */
917 case VINF_EM_DBG_STEPPED:
918 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
919 break;
920
921 case VINF_EM_DBG_BREAKPOINT:
922 rc = DBGFR3BpHit(pVM, pVCpu);
923 break;
924
925 case VINF_EM_DBG_STOP:
926 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
927 break;
928
929 case VINF_EM_DBG_EVENT:
930 rc = DBGFR3EventHandlePending(pVM, pVCpu);
931 break;
932
933 case VINF_EM_DBG_HYPER_STEPPED:
934 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
935 break;
936
937 case VINF_EM_DBG_HYPER_BREAKPOINT:
938 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
939 break;
940
941 case VINF_EM_DBG_HYPER_ASSERTION:
942 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
943 RTLogFlush(NULL);
944 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
945 break;
946
947 /*
948 * Guru meditation.
949 */
950 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
951 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
952 break;
953 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
954 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
955 break;
956
957 default: /** @todo don't use default for guru, but make special errors code! */
958 {
959 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
960 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
961 break;
962 }
963 }
964
965 /*
966 * Process the result.
967 */
968 switch (VBOXSTRICTRC_VAL(rc))
969 {
970 /*
971 * Continue the debugging loop.
972 */
973 case VINF_EM_DBG_STEP:
974 case VINF_EM_DBG_STOP:
975 case VINF_EM_DBG_EVENT:
976 case VINF_EM_DBG_STEPPED:
977 case VINF_EM_DBG_BREAKPOINT:
978 case VINF_EM_DBG_HYPER_STEPPED:
979 case VINF_EM_DBG_HYPER_BREAKPOINT:
980 case VINF_EM_DBG_HYPER_ASSERTION:
981 break;
982
983 /*
984 * Resuming execution (in some form) has to be done here if we got
985 * a hypervisor debug event.
986 */
987 case VINF_SUCCESS:
988 case VINF_EM_RESUME:
989 case VINF_EM_SUSPEND:
990 case VINF_EM_RESCHEDULE:
991 case VINF_EM_RESCHEDULE_REM:
992 case VINF_EM_HALT:
993 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
994 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
995 if (rc == VINF_SUCCESS)
996 rc = VINF_EM_RESCHEDULE;
997 return rc;
998
999 /*
1000 * The debugger isn't attached.
1001 * We'll simply turn the thing off since that's the easiest thing to do.
1002 */
1003 case VERR_DBGF_NOT_ATTACHED:
1004 switch (VBOXSTRICTRC_VAL(rcLast))
1005 {
1006 case VINF_EM_DBG_HYPER_STEPPED:
1007 case VINF_EM_DBG_HYPER_BREAKPOINT:
1008 case VINF_EM_DBG_HYPER_ASSERTION:
1009 case VERR_TRPM_PANIC:
1010 case VERR_TRPM_DONT_PANIC:
1011 case VERR_VMM_RING0_ASSERTION:
1012 case VERR_VMM_HYPER_CR3_MISMATCH:
1013 case VERR_VMM_RING3_CALL_DISABLED:
1014 return rcLast;
1015 }
1016 return VINF_EM_OFF;
1017
1018 /*
1019 * Status codes terminating the VM in one or another sense.
1020 */
1021 case VINF_EM_TERMINATE:
1022 case VINF_EM_OFF:
1023 case VINF_EM_RESET:
1024 case VINF_EM_NO_MEMORY:
1025 case VINF_EM_RAW_STALE_SELECTOR:
1026 case VINF_EM_RAW_IRET_TRAP:
1027 case VERR_TRPM_PANIC:
1028 case VERR_TRPM_DONT_PANIC:
1029 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1030 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1031 case VERR_VMM_RING0_ASSERTION:
1032 case VERR_VMM_HYPER_CR3_MISMATCH:
1033 case VERR_VMM_RING3_CALL_DISABLED:
1034 case VERR_INTERNAL_ERROR:
1035 case VERR_INTERNAL_ERROR_2:
1036 case VERR_INTERNAL_ERROR_3:
1037 case VERR_INTERNAL_ERROR_4:
1038 case VERR_INTERNAL_ERROR_5:
1039 case VERR_IPE_UNEXPECTED_STATUS:
1040 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1041 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1042 return rc;
1043
1044 /*
1045 * The rest is unexpected, and will keep us here.
1046 */
1047 default:
1048 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1049 break;
1050 }
1051 } /* debug for ever */
1052}
1053
1054
1055/**
1056 * Executes recompiled code.
1057 *
1058 * This function contains the recompiler version of the inner
1059 * execution loop (the outer loop being in EMR3ExecuteVM()).
1060 *
1061 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1062 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1063 *
1064 * @param pVM The cross context VM structure.
1065 * @param pVCpu The cross context virtual CPU structure.
1066 * @param fWasHalted Set if we're comming out of a CPU HALT state.
1067 * @param pfFFDone Where to store an indicator telling whether or not
1068 * FFs were done before returning.
1069 *
1070 */
1071static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool fWasHalted, bool *pfFFDone)
1072{
1073 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a);
1074#ifdef VBOX_VMM_TARGET_ARMV8
1075 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64));
1076#elif defined(VBOX_VMM_TARGET_X86)
1077 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
1078#else
1079# error "port me"
1080#endif
1081
1082 /*
1083 * Loop till we get a forced action which returns anything but VINF_SUCCESS.
1084 */
1085 *pfFFDone = false;
1086 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1087 for (;;)
1088 {
1089#ifdef LOG_ENABLED
1090# if defined(VBOX_VMM_TARGET_ARMV8)
1091 Log3(("EM: pc=%08RX64\n", CPUMGetGuestFlatPC(pVCpu)));
1092# elif defined(VBOX_VMM_TARGET_X86)
1093 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1094 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel,
1095 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF,
1096 (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1097 else
1098 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1099# else
1100# error "port me"
1101# endif
1102#endif
1103
1104 /*
1105 * Execute.
1106 */
1107 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1108 {
1109 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1110#ifdef VBOX_WITH_IEM_RECOMPILER
1111 if (pVM->em.s.fIemRecompiled)
1112 rcStrict = IEMExecRecompiler(pVM, pVCpu, fWasHalted);
1113 else
1114#endif
1115 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/);
1116 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1117 }
1118 else
1119 {
1120 /* Give up this time slice; virtual time continues */
1121 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1122 RTThreadSleep(5);
1123 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1124 rcStrict = VINF_SUCCESS;
1125 }
1126
1127 /*
1128 * Deal with high priority post execution FFs before doing anything
1129 * else. Sync back the state and leave the lock to be on the safe side.
1130 */
1131 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1132 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1133 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
1134
1135 /*
1136 * Process the returned status code.
1137 */
1138 if (rcStrict != VINF_SUCCESS)
1139 {
1140#ifndef VBOX_VMM_TARGET_ARMV8
1141 if (rcStrict == VINF_EM_EMULATE_SPLIT_LOCK)
1142 rcStrict = emR3ExecuteSplitLockInstruction(pVM, pVCpu);
1143#endif
1144 if (rcStrict != VINF_SUCCESS)
1145 {
1146#if 0
1147 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST))
1148 break;
1149 /* Fatal error: */
1150#endif
1151 break;
1152 }
1153 }
1154
1155
1156 /*
1157 * Check and execute forced actions.
1158 *
1159 * Sync back the VM state and leave the lock before calling any of
1160 * these, you never know what's going to happen here.
1161 */
1162#ifdef VBOX_HIGH_RES_TIMERS_HACK
1163 TMTimerPollVoid(pVM, pVCpu);
1164#endif
1165 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1166 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1167 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1168 {
1169 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
1170 VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1171 if ( rcStrict != VINF_SUCCESS
1172 && rcStrict != VINF_EM_RESCHEDULE_REM)
1173 {
1174 *pfFFDone = true;
1175 break;
1176 }
1177 }
1178
1179 /*
1180 * Check if we can switch back to the main execution engine now.
1181 */
1182#ifdef VBOX_WITH_HWVIRT
1183 if (VM_IS_HM_ENABLED(pVM))
1184 {
1185 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1186 {
1187 *pfFFDone = true;
1188 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1189 break;
1190 }
1191 }
1192 else
1193#endif
1194 if (VM_IS_NEM_ENABLED(pVM))
1195 {
1196 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1197 {
1198 *pfFFDone = true;
1199 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1200 break;
1201 }
1202 }
1203
1204#ifdef VBOX_WITH_IEM_RECOMPILER
1205 fWasHalted = false;
1206#else
1207 RT_NOREF(fWasHalted);
1208#endif
1209 } /* The Inner Loop, recompiled execution mode version. */
1210
1211 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatREMTotal, a);
1212 return rcStrict;
1213}
1214
1215
1216/**
1217 * Decides whether to execute HM, NEM, IEM/interpreter or IEM/recompiler.
1218 *
1219 * @returns new EM state
1220 * @param pVM The cross context VM structure.
1221 * @param pVCpu The cross context virtual CPU structure.
1222 */
1223EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1224{
1225 /*
1226 * We stay in the wait for SIPI state unless explicitly told otherwise.
1227 */
1228 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1229 return EMSTATE_WAIT_SIPI;
1230
1231 /*
1232 * Can we use the default engine. IEM is the fallback.
1233 */
1234 if (!pVM->em.s.fIemExecutesAll)
1235 {
1236 switch (pVM->bMainExecutionEngine)
1237 {
1238#ifdef VBOX_WITH_HWVIRT
1239 case VM_EXEC_ENGINE_HW_VIRT:
1240 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1241 return EMSTATE_HM;
1242 break;
1243#endif
1244#ifdef VBOX_WITH_NATIVE_NEM
1245 case VM_EXEC_ENGINE_NATIVE_API:
1246 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1247 return EMSTATE_NEM;
1248 break;
1249#endif
1250 case VM_EXEC_ENGINE_IEM:
1251 break;
1252 default:
1253 AssertMsgFailed(("bMainExecutionEngine=%d\n", pVM->bMainExecutionEngine));
1254 break;
1255 }
1256 }
1257#ifdef VBOX_WITH_IEM_RECOMPILER
1258 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1259#else
1260 return EMSTATE_IEM;
1261#endif
1262}
1263
1264
1265/**
1266 * Executes all high priority post execution force actions.
1267 *
1268 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1269 * fatal error status code.
1270 *
1271 * @param pVM The cross context VM structure.
1272 * @param pVCpu The cross context virtual CPU structure.
1273 * @param rc The current strict VBox status code rc.
1274 */
1275VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1276{
1277 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1278
1279 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1280 PDMCritSectBothFF(pVM, pVCpu);
1281
1282#ifdef VBOX_VMM_TARGET_X86
1283 /* Update CR3 (Nested Paging case for HM). */
1284 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1285 {
1286 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1287 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1288 if (RT_FAILURE(rc2))
1289 return rc2;
1290 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1291 }
1292#endif
1293
1294 /* IEM has pending work (typically memory write after INS instruction). */
1295 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1296 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1297
1298 /* IOM has pending work (comitting an I/O or MMIO write). */
1299 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1300 {
1301 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1302 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1303 { /* half likely, or at least it's a line shorter. */ }
1304 else if (rc == VINF_SUCCESS)
1305 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1306 else
1307 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1308 }
1309
1310 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1311 {
1312 if ( rc > VINF_EM_NO_MEMORY
1313 && rc <= VINF_EM_LAST)
1314 rc = VINF_EM_NO_MEMORY;
1315 }
1316
1317 return rc;
1318}
1319
1320#ifdef VBOX_VMM_TARGET_X86
1321
1322/**
1323 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1324 *
1325 * @returns VBox status code.
1326 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1327 * @param pVCpu The cross context virtual CPU structure.
1328 */
1329static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1330{
1331# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1332 /* Handle the "external interrupt" VM-exit intercept. */
1333 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1334 && !CPUMIsGuestVmxExitCtlsSet(&pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1335 {
1336 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1337 AssertMsg( rcStrict != VINF_VMX_VMEXIT /* VM-exit should have been converted to VINF_SUCCESS. */
1338 && rcStrict != VINF_NO_CHANGE
1339 && rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1340 return VBOXSTRICTRC_VAL(rcStrict);
1341 }
1342# else
1343 RT_NOREF(pVCpu);
1344# endif
1345 return VINF_NO_CHANGE;
1346}
1347
1348
1349/**
1350 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1351 *
1352 * @returns VBox status code.
1353 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1354 * @param pVCpu The cross context virtual CPU structure.
1355 */
1356static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1357{
1358# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1359 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1360 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1361 {
1362 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1363 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1364 if (RT_SUCCESS(rcStrict))
1365 {
1366 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1367 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1368 return VBOXSTRICTRC_VAL(rcStrict);
1369 }
1370
1371 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1372 return VINF_EM_TRIPLE_FAULT;
1373 }
1374# else
1375 NOREF(pVCpu);
1376# endif
1377 return VINF_NO_CHANGE;
1378}
1379
1380
1381/**
1382 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1383 *
1384 * @returns VBox status code.
1385 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1386 * @param pVCpu The cross context virtual CPU structure.
1387 */
1388static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1389{
1390# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1391 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1392 {
1393 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1394 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1395 if (RT_SUCCESS(rcStrict))
1396 {
1397 Assert(rcStrict != VINF_SVM_VMEXIT);
1398 return VBOXSTRICTRC_VAL(rcStrict);
1399 }
1400 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1401 return VINF_EM_TRIPLE_FAULT;
1402 }
1403# else
1404 NOREF(pVCpu);
1405# endif
1406 return VINF_NO_CHANGE;
1407}
1408
1409#endif /* VBOX_VMM_TARGET_X86 */
1410
1411/**
1412 * Executes all pending forced actions.
1413 *
1414 * Forced actions can cause execution delays and execution
1415 * rescheduling. The first we deal with using action priority, so
1416 * that for instance pending timers aren't scheduled and ran until
1417 * right before execution. The rescheduling we deal with using
1418 * return codes. The same goes for VM termination, only in that case
1419 * we exit everything.
1420 *
1421 * @returns VBox status code of equal or greater importance/severity than rc.
1422 * The most important ones are: VINF_EM_RESCHEDULE,
1423 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1424 *
1425 * @param pVM The cross context VM structure.
1426 * @param pVCpu The cross context virtual CPU structure.
1427 * @param rc The current rc.
1428 *
1429 */
1430int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1431{
1432 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1433#ifdef VBOX_STRICT
1434 int rcIrq = VINF_SUCCESS;
1435#endif
1436 int rc2;
1437#define UPDATE_RC() \
1438 do { \
1439 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1440 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1441 break; \
1442 if (!rc || rc2 < rc) \
1443 rc = rc2; \
1444 } while (0)
1445 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1446
1447 /*
1448 * Post execution chunk first.
1449 */
1450 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1451 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1452 {
1453 /*
1454 * EMT Rendezvous (must be serviced before termination).
1455 */
1456 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1457 {
1458 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1459 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1460 UPDATE_RC();
1461 /** @todo HACK ALERT! The following test is to make sure EM+TM
1462 * thinks the VM is stopped/reset before the next VM state change
1463 * is made. We need a better solution for this, or at least make it
1464 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1465 * VINF_EM_SUSPEND). */
1466 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1467 {
1468 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1469 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1470 return rc;
1471 }
1472 }
1473
1474 /*
1475 * State change request (cleared by vmR3SetStateLocked).
1476 */
1477 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1478 {
1479 VMSTATE enmState = VMR3GetState(pVM);
1480 switch (enmState)
1481 {
1482 case VMSTATE_FATAL_ERROR:
1483 case VMSTATE_FATAL_ERROR_LS:
1484 case VMSTATE_GURU_MEDITATION:
1485 case VMSTATE_GURU_MEDITATION_LS:
1486 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1487 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1488 return VINF_EM_SUSPEND;
1489
1490 case VMSTATE_DESTROYING:
1491 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1492 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1493 return VINF_EM_TERMINATE;
1494
1495 default:
1496 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1497 }
1498 }
1499
1500 /*
1501 * Debugger Facility polling.
1502 */
1503 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1504 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1505 {
1506 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1507 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1508 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1509 * somewhere before we get here, I would think. */
1510 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1511 rc = rc2;
1512 else
1513 UPDATE_RC();
1514 }
1515
1516 /*
1517 * Postponed reset request.
1518 */
1519 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1520 {
1521 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1522 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1523 UPDATE_RC();
1524 }
1525
1526#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1527 /*
1528 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1529 */
1530 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1531 {
1532 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1533 UPDATE_RC();
1534 if (rc == VINF_EM_NO_MEMORY)
1535 return rc;
1536 }
1537#endif
1538
1539 /* check that we got them all */
1540 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1541 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1542 }
1543
1544 /*
1545 * Normal priority then.
1546 * (Executed in no particular order.)
1547 */
1548 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1549 {
1550 /*
1551 * PDM Queues are pending.
1552 */
1553 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1554 PDMR3QueueFlushAll(pVM);
1555
1556 /*
1557 * PDM DMA transfers are pending.
1558 */
1559 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1560 PDMR3DmaRun(pVM);
1561
1562 /*
1563 * EMT Rendezvous (make sure they are handled before the requests).
1564 */
1565 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1566 {
1567 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1568 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1569 UPDATE_RC();
1570 /** @todo HACK ALERT! The following test is to make sure EM+TM
1571 * thinks the VM is stopped/reset before the next VM state change
1572 * is made. We need a better solution for this, or at least make it
1573 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1574 * VINF_EM_SUSPEND). */
1575 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1576 {
1577 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1578 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1579 return rc;
1580 }
1581 }
1582
1583 /*
1584 * Requests from other threads.
1585 */
1586 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1587 {
1588 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1589 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1590 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1591 {
1592 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1593 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1594 return rc2;
1595 }
1596 UPDATE_RC();
1597 /** @todo HACK ALERT! The following test is to make sure EM+TM
1598 * thinks the VM is stopped/reset before the next VM state change
1599 * is made. We need a better solution for this, or at least make it
1600 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1601 * VINF_EM_SUSPEND). */
1602 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1603 {
1604 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1605 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1606 return rc;
1607 }
1608 }
1609
1610 /* check that we got them all */
1611 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1612 }
1613
1614 /*
1615 * Normal priority then. (per-VCPU)
1616 * (Executed in no particular order.)
1617 */
1618 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1619 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1620 {
1621 /*
1622 * Requests from other threads.
1623 */
1624 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1625 {
1626 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1627 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1628 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1629 {
1630 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1631 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1632 return rc2;
1633 }
1634 UPDATE_RC();
1635 /** @todo HACK ALERT! The following test is to make sure EM+TM
1636 * thinks the VM is stopped/reset before the next VM state change
1637 * is made. We need a better solution for this, or at least make it
1638 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1639 * VINF_EM_SUSPEND). */
1640 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1641 {
1642 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1643 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1644 return rc;
1645 }
1646 }
1647
1648 /* check that we got them all */
1649 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1650 }
1651
1652 /*
1653 * High priority pre execution chunk last.
1654 * (Executed in ascending priority order.)
1655 */
1656 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1657 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1658 {
1659 /*
1660 * Timers before interrupts.
1661 */
1662 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1663 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1664 TMR3TimerQueuesDo(pVM);
1665
1666#ifdef VBOX_VMM_TARGET_X86
1667 /*
1668 * Pick up asynchronously posted interrupts into the APIC.
1669 */
1670 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1671 PDMApicUpdatePendingInterrupts(pVCpu);
1672
1673 /*
1674 * The instruction following an emulated STI should *always* be executed!
1675 *
1676 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1677 * the eip is the same as the inhibited instr address. Before we
1678 * are able to execute this instruction in raw mode (iret to
1679 * guest code) an external interrupt might force a world switch
1680 * again. Possibly allowing a guest interrupt to be dispatched
1681 * in the process. This could break the guest. Sounds very
1682 * unlikely, but such timing sensitive problem are not as rare as
1683 * you might think.
1684 *
1685 * Note! This used to be a force action flag. Can probably ditch this code.
1686 */
1687 /** @todo r=bird: the clearing case will *never* be taken here as
1688 * CPUMIsInInterruptShadow already makes sure the RIPs matches. */
1689 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1690 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1691 {
1692 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1693 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1694 {
1695 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1696 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1697 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1698 }
1699 else
1700 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1701 }
1702
1703 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1704 * delivered. */
1705
1706# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1707 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1708 | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW))
1709 {
1710 /*
1711 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1712 * Takes priority over even SMI and INIT signals.
1713 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1714 */
1715 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1716 {
1717 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1718 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1719 UPDATE_RC();
1720 }
1721
1722 /*
1723 * APIC write emulation MAY have a caused a VM-exit.
1724 * If it did cause a VM-exit, there's no point checking the other VMX non-root mode FFs here.
1725 */
1726 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
1727 {
1728 /*
1729 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1730 * Takes priority over "Traps on the previous instruction".
1731 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1732 */
1733 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1734 {
1735 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1736 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1737 UPDATE_RC();
1738 }
1739 /*
1740 * VMX Nested-guest preemption timer VM-exit.
1741 * Takes priority over NMI-window VM-exits.
1742 */
1743 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1744 {
1745 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1746 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1747 UPDATE_RC();
1748 }
1749 /*
1750 * VMX interrupt-window and NMI-window VM-exits.
1751 * Takes priority over non-maskable interrupts (NMIs) and external interrupts respectively.
1752 * If we are in an interrupt shadow or if we already in the process of delivering
1753 * an event then these VM-exits cannot occur.
1754 *
1755 * Interrupt shadows block NMI-window VM-exits.
1756 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1757 *
1758 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1759 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1760 * See Intel spec. 6.7 "Nonmaskable Interrupt (NMI)".
1761 */
1762 else if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1763 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)
1764 && !TRPMHasTrap(pVCpu))
1765 {
1766 /*
1767 * VMX NMI-window VM-exit.
1768 */
1769 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1770 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1771 {
1772 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1773 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1774 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1775 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1776 && rc2 != VINF_VMX_VMEXIT
1777 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1778 UPDATE_RC();
1779 }
1780 /*
1781 * VMX interrupt-window VM-exit.
1782 * This is a bit messy with the way the code below is currently structured,
1783 * but checking VMCPU_FF_INTERRUPT_NMI here (combined with CPUMAreInterruptsInhibitedByNmi
1784 * already checked at this point) should allow a pending NMI to be delivered prior to
1785 * causing an interrupt-window VM-exit.
1786 */
1787 /** @todo Restructure this later to happen after injecting NMI/causing NMI-exit, see
1788 * code in VMX R0 event delivery. */
1789 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1790 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1791 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1792 {
1793 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1794 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1795 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1796 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1797 && rc2 != VINF_VMX_VMEXIT
1798 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1799 UPDATE_RC();
1800 }
1801 }
1802 }
1803
1804 /*
1805 * Interrupt-window and NMI-window force flags might still be pending if we didn't actually cause
1806 * a VM-exit above. They will get cleared eventually when ANY nested-guest VM-exit occurs.
1807 * However, the force flags asserted below MUST have been cleared at this point.
1808 */
1809 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1810 }
1811# endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1812
1813 /*
1814 * Guest event injection.
1815 */
1816 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1817 bool fWakeupPending = false;
1818 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1819 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1820 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1821 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1822 && (!rc || rc >= VINF_EM_RESCHEDULE_EXEC_ENGINE)
1823 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1824 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1825 {
1826 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1827 {
1828 bool fInVmxNonRootMode;
1829 bool fInSvmHwvirtMode;
1830 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1831 {
1832 fInVmxNonRootMode = false;
1833 fInSvmHwvirtMode = false;
1834 }
1835 else
1836 {
1837 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1838 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1839 }
1840
1841 /*
1842 * NMIs (take priority over external interrupts).
1843 */
1844 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1845 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1846 {
1847# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1848 if ( fInVmxNonRootMode
1849 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1850 {
1851 /* We MUST clear the NMI force-flag here, see @bugref{10318#c19}. */
1852 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1853 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1854 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1855 UPDATE_RC();
1856 }
1857 else
1858# endif
1859# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1860 if ( fInSvmHwvirtMode
1861 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1862 {
1863 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1864 AssertMsg( rc2 != VINF_SVM_VMEXIT
1865 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1866 UPDATE_RC();
1867 }
1868 else
1869# endif
1870 {
1871 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_NMI);
1872 if (rc2 == VINF_SUCCESS)
1873 {
1874 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1875 fWakeupPending = true;
1876# if 0 /* HMR3IsActive is not reliable (esp. after restore), just return VINF_EM_RESCHEDULE. */
1877 if (pVM->em.s.fIemExecutesAll)
1878 rc2 = VINF_EM_RESCHEDULE;
1879 else
1880 {
1881 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1882 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1883 : VINF_EM_RESCHEDULE_REM;
1884 }
1885# else
1886 rc2 = VINF_EM_RESCHEDULE;
1887# endif
1888 }
1889 UPDATE_RC();
1890 }
1891 }
1892# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1893 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1894 * actually pending like we currently do. */
1895# endif
1896 /*
1897 * External interrupts.
1898 */
1899 else
1900 {
1901 /*
1902 * VMX: virtual interrupts takes priority over physical interrupts.
1903 * SVM: physical interrupts takes priority over virtual interrupts.
1904 */
1905 if ( fInVmxNonRootMode
1906 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1907 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1908 {
1909 /** @todo NSTVMX: virtual-interrupt delivery. */
1910 rc2 = VINF_SUCCESS;
1911 }
1912 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1913 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1914 {
1915 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1916 if (fInVmxNonRootMode)
1917 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1918 else if (fInSvmHwvirtMode)
1919 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1920 else
1921 rc2 = VINF_NO_CHANGE;
1922
1923 if (rc2 == VINF_NO_CHANGE)
1924 {
1925 bool fInjected = false;
1926 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1927 /** @todo this really isn't nice, should properly handle this */
1928 /* Note! This can still cause a VM-exit (on Intel). */
1929 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1930 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1931 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1932 fWakeupPending = true;
1933 if ( pVM->em.s.fIemExecutesAll
1934 && ( rc2 == VINF_EM_RESCHEDULE_REM
1935 || rc2 == VINF_EM_RESCHEDULE_EXEC_ENGINE))
1936 rc2 = VINF_EM_RESCHEDULE;
1937# ifdef VBOX_STRICT
1938 if (fInjected)
1939 rcIrq = rc2;
1940# endif
1941 }
1942 UPDATE_RC();
1943 }
1944 else if ( fInSvmHwvirtMode
1945 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1946 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1947 {
1948 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1949 if (rc2 == VINF_NO_CHANGE)
1950 {
1951 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1952 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1953 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1954 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1955 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1956 rc2 = VINF_EM_RESCHEDULE;
1957# ifdef VBOX_STRICT
1958 rcIrq = rc2;
1959# endif
1960 }
1961 UPDATE_RC();
1962 }
1963 }
1964 } /* CPUMGetGuestGif */
1965 }
1966
1967#else /* VBOX_VMM_TARGET_ARMV8 */
1968 bool fWakeupPending = false;
1969 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VTIMER_ACTIVATED))
1970 {
1971 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VTIMER_ACTIVATED);
1972
1973 fWakeupPending = true;
1974 rc2 = VINF_EM_RESCHEDULE;
1975 UPDATE_RC();
1976 }
1977#endif /* VBOX_VMM_TARGET_ARMV8 */
1978
1979#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1980 /*
1981 * Allocate handy pages.
1982 */
1983 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1984 {
1985 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1986 UPDATE_RC();
1987 }
1988#endif
1989
1990 /*
1991 * Debugger Facility request.
1992 */
1993 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1994 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1995 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1996 {
1997 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1998 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1999 UPDATE_RC();
2000 }
2001
2002 /*
2003 * EMT Rendezvous (must be serviced before termination).
2004 */
2005 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2006 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2007 {
2008 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2009 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2010 UPDATE_RC();
2011 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2012 * stopped/reset before the next VM state change is made. We need a better
2013 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2014 * && rc >= VINF_EM_SUSPEND). */
2015 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2016 {
2017 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2018 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2019 return rc;
2020 }
2021 }
2022
2023 /*
2024 * State change request (cleared by vmR3SetStateLocked).
2025 */
2026 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2027 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2028 {
2029 VMSTATE enmState = VMR3GetState(pVM);
2030 switch (enmState)
2031 {
2032 case VMSTATE_FATAL_ERROR:
2033 case VMSTATE_FATAL_ERROR_LS:
2034 case VMSTATE_GURU_MEDITATION:
2035 case VMSTATE_GURU_MEDITATION_LS:
2036 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2037 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2038 return VINF_EM_SUSPEND;
2039
2040 case VMSTATE_DESTROYING:
2041 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2042 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2043 return VINF_EM_TERMINATE;
2044
2045 default:
2046 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2047 }
2048 }
2049
2050 /*
2051 * Out of memory? Since most of our fellow high priority actions may cause us
2052 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2053 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2054 * than us since we can terminate without allocating more memory.
2055 */
2056 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2057 {
2058#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2059 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2060#else
2061 rc2 = VINF_EM_NO_MEMORY;
2062#endif
2063 UPDATE_RC();
2064 if (rc == VINF_EM_NO_MEMORY)
2065 return rc;
2066 }
2067
2068 /*
2069 * If the virtual sync clock is still stopped, make TM restart it.
2070 */
2071 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2072 TMR3VirtualSyncFF(pVM, pVCpu);
2073
2074#ifdef DEBUG
2075 /*
2076 * Debug, pause the VM.
2077 */
2078 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2079 {
2080 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2081 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2082 return VINF_EM_SUSPEND;
2083 }
2084#endif
2085
2086 /* check that we got them all */
2087 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2088#ifdef VBOX_VMM_TARGET_ARMV8
2089 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF));
2090#elif defined(VBOX_VMM_TARGET_X86)
2091 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2092#else
2093# error "port me"
2094#endif
2095 }
2096
2097#undef UPDATE_RC
2098 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2099 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2100 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2101 return rc;
2102}
2103
2104
2105/**
2106 * Check if the preset execution time cap restricts guest execution scheduling.
2107 *
2108 * @returns true if allowed, false otherwise
2109 * @param pVM The cross context VM structure.
2110 * @param pVCpu The cross context virtual CPU structure.
2111 */
2112bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu)
2113{
2114 Assert(pVM->uCpuExecutionCap != 100);
2115 uint64_t cMsUserTime;
2116 uint64_t cMsKernelTime;
2117 if (RT_SUCCESS(RTThreadGetExecutionTimeMilli(&cMsKernelTime, &cMsUserTime)))
2118 {
2119 uint64_t const msTimeNow = RTTimeMilliTS();
2120 if (pVCpu->em.s.msTimeSliceStart + EM_TIME_SLICE < msTimeNow)
2121 {
2122 /* New time slice. */
2123 pVCpu->em.s.msTimeSliceStart = msTimeNow;
2124 pVCpu->em.s.cMsTimeSliceStartExec = cMsKernelTime + cMsUserTime;
2125 pVCpu->em.s.cMsTimeSliceExec = 0;
2126 }
2127 pVCpu->em.s.cMsTimeSliceExec = cMsKernelTime + cMsUserTime - pVCpu->em.s.cMsTimeSliceStartExec;
2128
2129 bool const fRet = pVCpu->em.s.cMsTimeSliceExec < (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100;
2130 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.msTimeSliceStart,
2131 pVCpu->em.s.cMsTimeSliceStartExec, pVCpu->em.s.cMsTimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2132 return fRet;
2133 }
2134 return true;
2135}
2136
2137
2138/**
2139 * Execute VM.
2140 *
2141 * This function is the main loop of the VM. The emulation thread
2142 * calls this function when the VM has been successfully constructed
2143 * and we're ready for executing the VM.
2144 *
2145 * Returning from this function means that the VM is turned off or
2146 * suspended (state already saved) and deconstruction is next in line.
2147 *
2148 * All interaction from other thread are done using forced actions
2149 * and signalling of the wait object.
2150 *
2151 * @returns VBox status code, informational status codes may indicate failure.
2152 * @param pVM The cross context VM structure.
2153 * @param pVCpu The cross context virtual CPU structure.
2154 */
2155VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2156{
2157 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2158 pVM,
2159 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2160 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2161 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2162 VM_ASSERT_EMT(pVM);
2163 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2164 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2165 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2166 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2167
2168 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2169 if (rc == 0)
2170 {
2171 /*
2172 * Start the virtual time.
2173 */
2174 TMR3NotifyResume(pVM, pVCpu);
2175
2176 /*
2177 * The Outer Main Loop.
2178 */
2179 bool fFFDone = false;
2180
2181 /* Reschedule right away to start in the right state. */
2182 rc = VINF_SUCCESS;
2183
2184 /* If resuming after a pause or a state load, restore the previous
2185 state or else we'll start executing code. Else, just reschedule. */
2186 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2187 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2188 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2189 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2190 else
2191 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2192 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2193
2194 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2195 for (;;)
2196 {
2197 /*
2198 * Before we can schedule anything (we're here because
2199 * scheduling is required) we must service any pending
2200 * forced actions to avoid any pending action causing
2201 * immediate rescheduling upon entering an inner loop
2202 *
2203 * Do forced actions.
2204 */
2205 if ( !fFFDone
2206 && RT_SUCCESS(rc)
2207 && rc != VINF_EM_TERMINATE
2208 && rc != VINF_EM_OFF
2209 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2210 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2211 {
2212 rc = emR3ForcedActions(pVM, pVCpu, rc);
2213 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2214 }
2215 else if (fFFDone)
2216 fFFDone = false;
2217
2218#if defined(VBOX_STRICT) && defined(VBOX_VMM_TARGET_X86)
2219 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2220#endif
2221
2222 /*
2223 * Now what to do?
2224 */
2225 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2226 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2227 switch (rc)
2228 {
2229 /*
2230 * Keep doing what we're currently doing.
2231 */
2232 case VINF_SUCCESS:
2233 break;
2234
2235 /*
2236 * Reschedule - to main execution engine (HM, NEM, IEM/REM).
2237 */
2238 case VINF_EM_RESCHEDULE_EXEC_ENGINE:
2239 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2240 if (!pVM->em.s.fIemExecutesAll)
2241 {
2242#ifdef VBOX_WITH_HWVIRT
2243 if (VM_IS_HM_ENABLED(pVM))
2244 {
2245 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2246 {
2247 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2248 pVCpu->em.s.enmState = EMSTATE_HM;
2249 break;
2250 }
2251 }
2252 else
2253#endif
2254 if (VM_IS_NEM_ENABLED(pVM) && NEMR3CanExecuteGuest(pVM, pVCpu))
2255 {
2256 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2257 pVCpu->em.s.enmState = EMSTATE_NEM;
2258 break;
2259 }
2260 }
2261
2262 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_RECOMPILER)\n", enmOldState, EMSTATE_RECOMPILER));
2263 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2264 break;
2265
2266 /*
2267 * Reschedule - to recompiled execution.
2268 */
2269 case VINF_EM_RESCHEDULE_REM:
2270 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2271 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n",
2272 enmOldState, EMSTATE_RECOMPILER));
2273 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2274 break;
2275
2276 /*
2277 * Resume.
2278 */
2279 case VINF_EM_RESUME:
2280 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2281 /* Don't reschedule in the halted or wait-for-SIPI cases. */
2282 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2283 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2284 {
2285 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2286 break;
2287 }
2288 /* fall through and get scheduled. */
2289 RT_FALL_THRU();
2290
2291 /*
2292 * Reschedule.
2293 */
2294 case VINF_EM_RESCHEDULE:
2295 {
2296 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2297 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2298 pVCpu->em.s.enmState = enmState;
2299 break;
2300 }
2301
2302 /*
2303 * Halted.
2304 */
2305 case VINF_EM_HALT:
2306 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2307 pVCpu->em.s.enmState = EMSTATE_HALTED;
2308 break;
2309
2310 /*
2311 * Switch to the wait for SIPI state (application processor only)
2312 */
2313 case VINF_EM_WAIT_SIPI:
2314 Assert(pVCpu->idCpu != 0);
2315 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2316 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2317 break;
2318
2319
2320 /*
2321 * Suspend.
2322 */
2323 case VINF_EM_SUSPEND:
2324 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2325 Assert(enmOldState != EMSTATE_SUSPENDED);
2326 pVCpu->em.s.enmPrevState = enmOldState;
2327 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2328 break;
2329
2330 /*
2331 * Reset.
2332 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2333 */
2334 case VINF_EM_RESET:
2335 {
2336 if (pVCpu->idCpu == 0)
2337 {
2338 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2339 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2340 pVCpu->em.s.enmState = enmState;
2341 }
2342 else
2343 {
2344 /* All other VCPUs go into the wait for SIPI state. */
2345 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2346 }
2347 break;
2348 }
2349
2350 /*
2351 * Power Off.
2352 */
2353 case VINF_EM_OFF:
2354 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2355 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2356 TMR3NotifySuspend(pVM, pVCpu);
2357 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2358 return rc;
2359
2360 /*
2361 * Terminate the VM.
2362 */
2363 case VINF_EM_TERMINATE:
2364 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2365 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2366 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2367 TMR3NotifySuspend(pVM, pVCpu);
2368 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2369 return rc;
2370
2371
2372 /*
2373 * Out of memory, suspend the VM and stuff.
2374 */
2375 case VINF_EM_NO_MEMORY:
2376 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2377 Assert(enmOldState != EMSTATE_SUSPENDED);
2378 pVCpu->em.s.enmPrevState = enmOldState;
2379 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2380 TMR3NotifySuspend(pVM, pVCpu);
2381 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2382
2383 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2384 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2385 if (rc != VINF_EM_SUSPEND)
2386 {
2387 if (RT_SUCCESS_NP(rc))
2388 {
2389 AssertLogRelMsgFailed(("%Rrc\n", rc));
2390 rc = VERR_EM_INTERNAL_ERROR;
2391 }
2392 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2393 }
2394 return rc;
2395
2396 /*
2397 * Guest debug events.
2398 */
2399 case VINF_EM_DBG_STEPPED:
2400 case VINF_EM_DBG_STOP:
2401 case VINF_EM_DBG_EVENT:
2402 case VINF_EM_DBG_BREAKPOINT:
2403 case VINF_EM_DBG_STEP:
2404 if (enmOldState == EMSTATE_HM)
2405 {
2406 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2407 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2408 }
2409 else if (enmOldState == EMSTATE_NEM)
2410 {
2411 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2412 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2413 }
2414 else if (enmOldState == EMSTATE_RECOMPILER)
2415 {
2416 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RECOMPILER));
2417 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RECOMPILER;
2418 }
2419 else
2420 {
2421#ifdef VBOX_VMM_TARGET_ARMV8
2422 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2423 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM; /** @todo No IEM yet and this gets selected if enmOldState == EMSTATE_HALTED. */
2424#else
2425 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2426 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2427#endif
2428 }
2429 break;
2430
2431 /*
2432 * Hypervisor debug events.
2433 */
2434 case VINF_EM_DBG_HYPER_STEPPED:
2435 case VINF_EM_DBG_HYPER_BREAKPOINT:
2436 case VINF_EM_DBG_HYPER_ASSERTION:
2437 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2438 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2439 break;
2440
2441 /*
2442 * Triple fault.
2443 */
2444 case VINF_EM_TRIPLE_FAULT:
2445 if (!pVM->em.s.fGuruOnTripleFault)
2446 {
2447 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2448 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2449 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2450 continue;
2451 }
2452 /* Else fall through and trigger a guru. */
2453 RT_FALL_THRU();
2454
2455 case VERR_VMM_RING0_ASSERTION:
2456 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2457 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2458 break;
2459
2460 /*
2461 * Any error code showing up here other than the ones we
2462 * know and process above are considered to be FATAL.
2463 *
2464 * Unknown warnings and informational status codes are also
2465 * included in this.
2466 */
2467 default:
2468 if (RT_SUCCESS_NP(rc))
2469 {
2470 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2471 rc = VERR_EM_INTERNAL_ERROR;
2472 }
2473 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2474 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2475 break;
2476 }
2477
2478 /*
2479 * Act on state transition.
2480 */
2481 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2482 if (enmOldState != enmNewState)
2483 {
2484 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2485
2486 /* Clear MWait flags and the unhalt FF. */
2487 if ( enmOldState == EMSTATE_HALTED
2488 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2489 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2490 && ( enmNewState == EMSTATE_HM
2491 || enmNewState == EMSTATE_NEM
2492 || enmNewState == EMSTATE_RECOMPILER
2493 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2494 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2495 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2496 || enmNewState == EMSTATE_DEBUG_GUEST_RECOMPILER) )
2497 {
2498 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2499 {
2500 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2501 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2502 }
2503 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2504 {
2505 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2506 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2507 }
2508 }
2509 }
2510 else
2511 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2512
2513 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2514 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2515
2516 /*
2517 * Act on the new state.
2518 */
2519 switch (enmNewState)
2520 {
2521 /*
2522 * Execute hardware accelerated raw.
2523 */
2524 case EMSTATE_HM:
2525#ifdef VBOX_WITH_HWVIRT
2526 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2527#else
2528 AssertReleaseFailedStmt(rc = VERR_EM_INTERNAL_ERROR); /* Should never get here. */
2529#endif
2530 break;
2531
2532 /*
2533 * Execute hardware accelerated raw.
2534 */
2535 case EMSTATE_NEM:
2536 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2537 break;
2538
2539 /*
2540 * Execute recompiled.
2541 */
2542 case EMSTATE_RECOMPILER:
2543 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, enmOldState == EMSTATE_HALTED, &fFFDone));
2544 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc));
2545 break;
2546
2547 /*
2548 * Execute in the interpreter.
2549 */
2550 case EMSTATE_IEM:
2551 {
2552#if 0 /* For comparing HM and IEM (@bugref{10464}). */
2553 PCPUMCTX const pCtx = &pVCpu->cpum.GstCtx;
2554 PCX86FXSTATE const pX87 = &pCtx->XState.x87;
2555 Log11(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
2556 "eip=%08x esp=%08x ebp=%08x eflags=%08x\n"
2557 "cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x\n"
2558 "fsw=%04x fcw=%04x ftw=%02x top=%u%s%s%s%s%s%s%s%s%s\n"
2559 "st0=%.10Rhxs st1=%.10Rhxs st2=%.10Rhxs st3=%.10Rhxs\n"
2560 "st4=%.10Rhxs st5=%.10Rhxs st6=%.10Rhxs st7=%.10Rhxs\n",
2561 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->edi, pCtx->edi,
2562 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.u,
2563 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, pCtx->fs.Sel, pCtx->gs.Sel,
2564 pX87->FSW, pX87->FCW, pX87->FTW, X86_FSW_TOP_GET(pX87->FSW),
2565 pX87->FSW & X86_FSW_ES ? " ES!" : "",
2566 pX87->FSW & X86_FSW_IE ? " IE" : "",
2567 pX87->FSW & X86_FSW_DE ? " DE" : "",
2568 pX87->FSW & X86_FSW_SF ? " SF" : "",
2569 pX87->FSW & X86_FSW_B ? " B!" : "",
2570 pX87->FSW & X86_FSW_C0 ? " C0" : "",
2571 pX87->FSW & X86_FSW_C1 ? " C1" : "",
2572 pX87->FSW & X86_FSW_C2 ? " C2" : "",
2573 pX87->FSW & X86_FSW_C3 ? " C3" : "",
2574 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(0)],
2575 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(1)],
2576 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(2)],
2577 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(3)],
2578 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(4)],
2579 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(5)],
2580 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(6)],
2581 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(7)]));
2582 DBGFR3DisasInstrCurrentLogInternal(pVCpu, NULL);
2583#endif
2584
2585 uint32_t cInstructions = 0;
2586#if 0 /* For testing purposes. */
2587 //STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2588 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2589 //STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2590 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_EXEC_ENGINE || rc == VINF_EM_RESCHEDULE_REM)
2591 rc = VINF_SUCCESS;
2592 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2593#endif
2594 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2595 if (pVM->em.s.fIemExecutesAll)
2596 {
2597 Assert(rc != VINF_EM_RESCHEDULE_REM);
2598 Assert(rc != VINF_EM_RESCHEDULE_EXEC_ENGINE);
2599#ifdef VBOX_HIGH_RES_TIMERS_HACK
2600 if (cInstructions < 2048)
2601 TMTimerPollVoid(pVM, pVCpu);
2602#endif
2603 }
2604 else if (rc == VINF_SUCCESS)
2605 rc = VINF_EM_RESCHEDULE; /* Need to check whether we can run in HM or NEM again. */
2606#ifdef VBOX_VMM_TARGET_X86
2607 if (rc != VINF_EM_EMULATE_SPLIT_LOCK)
2608 { /* likely */ }
2609 else
2610 rc = VBOXSTRICTRC_TODO(emR3ExecuteSplitLockInstruction(pVM, pVCpu));
2611#endif
2612 fFFDone = false;
2613 break;
2614 }
2615
2616 /*
2617 * Application processor execution halted until SIPI.
2618 */
2619 case EMSTATE_WAIT_SIPI:
2620 /* no break */
2621 /*
2622 * hlt - execution halted until interrupt.
2623 */
2624 case EMSTATE_HALTED:
2625 {
2626 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2627 /* If HM (or someone else) store a pending interrupt in
2628 TRPM, it must be dispatched ASAP without any halting.
2629 Anything pending in TRPM has been accepted and the CPU
2630 should already be the right state to receive it. */
2631 if (TRPMHasTrap(pVCpu))
2632 rc = VINF_EM_RESCHEDULE;
2633#ifdef VBOX_VMM_TARGET_X86
2634 /* MWAIT has a special extension where it's woken up when
2635 an interrupt is pending even when IF=0. */
2636 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2637 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2638 {
2639 rc = VMR3WaitHalted(pVM, pVCpu, 0 /*fFlags*/);
2640 if (rc == VINF_SUCCESS)
2641 {
2642 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2643 PDMApicUpdatePendingInterrupts(pVCpu);
2644
2645 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2646 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2647 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2648 {
2649 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2650 rc = VINF_EM_RESCHEDULE;
2651 }
2652
2653 }
2654 }
2655#endif
2656 else
2657 {
2658#ifdef VBOX_VMM_TARGET_ARMV8
2659 const uint32_t fWaitHalted = 0; /* WFI/WFE always return when an interrupt happens. */
2660#elif defined(VBOX_VMM_TARGET_X86)
2661 const uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS;
2662#endif
2663 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted);
2664
2665 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2666 check VMCPU_FF_UPDATE_APIC here. */
2667 if ( rc == VINF_SUCCESS
2668#ifdef VBOX_VMM_TARGET_ARMV8
2669 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI
2670 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ
2671 | VMCPU_FF_VTIMER_ACTIVATED)
2672#elif defined(VBOX_VMM_TARGET_X86)
2673 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)
2674#else
2675# error "port me"
2676#endif
2677 )
2678 {
2679 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2680 rc = VINF_EM_RESCHEDULE;
2681 }
2682 }
2683
2684 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2685 break;
2686 }
2687
2688 /*
2689 * Suspended - return to VM.cpp.
2690 */
2691 case EMSTATE_SUSPENDED:
2692 TMR3NotifySuspend(pVM, pVCpu);
2693 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2694 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2695 return VINF_EM_SUSPEND;
2696
2697 /*
2698 * Debugging in the guest.
2699 */
2700 case EMSTATE_DEBUG_GUEST_RAW:
2701 case EMSTATE_DEBUG_GUEST_HM:
2702 case EMSTATE_DEBUG_GUEST_NEM:
2703 case EMSTATE_DEBUG_GUEST_IEM:
2704 case EMSTATE_DEBUG_GUEST_RECOMPILER:
2705 TMR3NotifySuspend(pVM, pVCpu);
2706 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2707 TMR3NotifyResume(pVM, pVCpu);
2708 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2709 break;
2710
2711 /*
2712 * Debugging in the hypervisor.
2713 */
2714 case EMSTATE_DEBUG_HYPER:
2715 {
2716 TMR3NotifySuspend(pVM, pVCpu);
2717 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2718
2719 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2720 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2721 if (rc != VINF_SUCCESS)
2722 {
2723 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2724 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2725 else
2726 {
2727 /* switch to guru meditation mode */
2728 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2729 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2730 VMMR3FatalDump(pVM, pVCpu, rc);
2731 }
2732 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2733 return rc;
2734 }
2735
2736 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2737 TMR3NotifyResume(pVM, pVCpu);
2738 break;
2739 }
2740
2741 /*
2742 * Guru meditation takes place in the debugger.
2743 */
2744 case EMSTATE_GURU_MEDITATION:
2745 {
2746 TMR3NotifySuspend(pVM, pVCpu);
2747 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2748 VMMR3FatalDump(pVM, pVCpu, rc);
2749 emR3Debug(pVM, pVCpu, rc);
2750 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2751 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2752 return rc;
2753 }
2754
2755 /*
2756 * The states we don't expect here.
2757 */
2758 case EMSTATE_NONE:
2759 case EMSTATE_RAW_OBSOLETE:
2760 case EMSTATE_IEM_THEN_REM_OBSOLETE:
2761 case EMSTATE_TERMINATING:
2762 default:
2763 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2764 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2765 TMR3NotifySuspend(pVM, pVCpu);
2766 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2767 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2768 return VERR_EM_INTERNAL_ERROR;
2769 }
2770 } /* The Outer Main Loop */
2771 }
2772 else
2773 {
2774 /*
2775 * Fatal error.
2776 */
2777 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2778 TMR3NotifySuspend(pVM, pVCpu);
2779 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2780 VMMR3FatalDump(pVM, pVCpu, rc);
2781 emR3Debug(pVM, pVCpu, rc);
2782 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2783 /** @todo change the VM state! */
2784 return rc;
2785 }
2786
2787 /* not reached */
2788}
2789
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette