VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 106518

Last change on this file since 106518 was 106375, checked in by vboxsync, 3 months ago

VMM/NEMR3Native-darwin-armv8.cpp: Implement support for breakpoints and single stepping in the VM debugger, bugref:10393 [missing file]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 115.1 KB
Line 
1/* $Id: EM.cpp 106375 2024-10-16 13:41:24Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RmExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/err.h>
73#include "VMMTracing.h"
74
75#include <iprt/asm.h>
76#include <iprt/string.h>
77#include <iprt/stream.h>
78#include <iprt/thread.h>
79
80#include "EMInline.h"
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92
93
94/**
95 * Initializes the EM.
96 *
97 * @returns VBox status code.
98 * @param pVM The cross context VM structure.
99 */
100VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
101{
102 LogFlow(("EMR3Init\n"));
103 /*
104 * Assert alignment and sizes.
105 */
106 AssertCompileMemberAlignment(VM, em.s, 32);
107 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
108 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
109 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
110
111 /*
112 * Init the structure.
113 */
114 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
115 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
116
117 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
118#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN) && !defined(VBOX_VMM_TARGET_ARMV8)
119 true
120#else
121 false
122#endif
123 );
124 AssertLogRelRCReturn(rc, rc);
125
126 bool fEnabled;
127 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->em.s.fGuruOnTripleFault = !fEnabled;
130 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
131 {
132 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
133 pVM->em.s.fGuruOnTripleFault = true;
134 }
135
136 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
137
138 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
139 * Whether to try correlate exit history in any context, detect hot spots and
140 * try optimize these using IEM if there are other exits close by. This
141 * overrides the context specific settings. */
142 bool fExitOptimizationEnabled = true;
143 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
144 AssertLogRelRCReturn(rc, rc);
145
146 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
147 * Whether to optimize exits in ring-0. Setting this to false will also disable
148 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
149 * capabilities of the host kernel, this optimization may be unavailable. */
150 bool fExitOptimizationEnabledR0 = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
152 AssertLogRelRCReturn(rc, rc);
153 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
154
155 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
156 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
157 * hooks are in effect). */
158 /** @todo change the default to true here */
159 bool fExitOptimizationEnabledR0PreemptDisabled = true;
160 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
161 AssertLogRelRCReturn(rc, rc);
162 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
163
164 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
165 * Maximum number of instruction to let EMHistoryExec execute in one go. */
166 uint16_t cHistoryExecMaxInstructions = 8192;
167 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryExecMaxInstructions < 16)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
171
172 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
173 * Maximum number of instruction between exits during probing. */
174 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
175#ifdef RT_OS_WINDOWS
176 if (VM_IS_NEM_ENABLED(pVM))
177 cHistoryProbeMaxInstructionsWithoutExit = 32;
178#endif
179 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
180 cHistoryProbeMaxInstructionsWithoutExit);
181 AssertLogRelRCReturn(rc, rc);
182 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
183 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
184 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
185
186 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
187 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
188 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
189 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
190 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
191 cHistoryProbeMinInstructions);
192 AssertLogRelRCReturn(rc, rc);
193
194 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
195 {
196 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
197 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
198 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
199 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
200 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
201 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
202 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
203 }
204
205#ifdef VBOX_WITH_IEM_RECOMPILER
206 /** @cfgm{/EM/IemRecompiled, bool, true}
207 * Whether IEM bulk execution is recompiled or interpreted. */
208 rc = CFGMR3QueryBoolDef(pCfgEM, "IemRecompiled", &pVM->em.s.fIemRecompiled, true);
209 AssertLogRelRCReturn(rc, rc);
210#endif
211
212 /*
213 * Saved state.
214 */
215 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
216 NULL, NULL, NULL,
217 NULL, emR3Save, NULL,
218 NULL, emR3Load, NULL);
219 if (RT_FAILURE(rc))
220 return rc;
221
222 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
223 {
224 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
225
226 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
227 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
228 pVCpu->em.s.msTimeSliceStart = 0; /* paranoia */
229 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
230
231# define EM_REG_COUNTER(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_COUNTER_USED(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
237 AssertRC(rc);
238
239# define EM_REG_PROFILE(a, b, c) \
240 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
241 AssertRC(rc);
242
243# define EM_REG_PROFILE_ADV(a, b, c) \
244 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
245 AssertRC(rc);
246
247 /*
248 * Statistics.
249 */
250#ifdef VBOX_WITH_STATISTICS
251 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
252 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
253
254 /* these should be considered for release statistics. */
255 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
257 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
258#endif
259 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
260 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
261#ifdef VBOX_WITH_STATISTICS
262 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
263 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
264 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
265#endif
266 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
267 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
268#ifdef VBOX_WITH_STATISTICS
269 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
270#endif
271
272 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
273 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
274 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
275 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RecompilerExecute (excluding FFs).");
276
277 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
278
279 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
280 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
281 AssertRC(rc);
282
283 /* History record statistics */
284 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
285 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
286 AssertRC(rc);
287
288 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
289 {
290 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
291 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
292 AssertRC(rc);
293 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
294 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
295 AssertRC(rc);
296 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
297 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
298 AssertRC(rc);
299 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
300 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
301 AssertRC(rc);
302 }
303
304 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
305 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
306 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
307 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
308 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
309 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
312 }
313
314 emR3InitDbg(pVM);
315 return VINF_SUCCESS;
316}
317
318
319/**
320 * Called when a VM initialization stage is completed.
321 *
322 * @returns VBox status code.
323 * @param pVM The cross context VM structure.
324 * @param enmWhat The initialization state that was completed.
325 */
326VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
327{
328 if (enmWhat == VMINITCOMPLETED_RING0)
329 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
330 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
331 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
332 return VINF_SUCCESS;
333}
334
335
336/**
337 * Applies relocations to data and code managed by this
338 * component. This function will be called at init and
339 * whenever the VMM need to relocate it self inside the GC.
340 *
341 * @param pVM The cross context VM structure.
342 */
343VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
344{
345 LogFlow(("EMR3Relocate\n"));
346 RT_NOREF(pVM);
347}
348
349
350/**
351 * Reset the EM state for a CPU.
352 *
353 * Called by EMR3Reset and hot plugging.
354 *
355 * @param pVCpu The cross context virtual CPU structure.
356 */
357VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
358{
359 /* Reset scheduling state. */
360 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
361
362 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
363 out of the HALTED state here so that enmPrevState doesn't end up as
364 HALTED when EMR3Execute returns. */
365 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
366 {
367 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
368 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
369 }
370}
371
372
373/**
374 * Reset notification.
375 *
376 * @param pVM The cross context VM structure.
377 */
378VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
379{
380 Log(("EMR3Reset: \n"));
381 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
382 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
383}
384
385
386/**
387 * Terminates the EM.
388 *
389 * Termination means cleaning up and freeing all resources,
390 * the VM it self is at this point powered off or suspended.
391 *
392 * @returns VBox status code.
393 * @param pVM The cross context VM structure.
394 */
395VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
396{
397 RT_NOREF(pVM);
398 return VINF_SUCCESS;
399}
400
401
402/**
403 * Execute state save operation.
404 *
405 * @returns VBox status code.
406 * @param pVM The cross context VM structure.
407 * @param pSSM SSM operation handle.
408 */
409static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
410{
411 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
412 {
413 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
414
415 SSMR3PutBool(pSSM, false /*fForceRAW*/);
416
417 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
418 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
419 SSMR3PutU32(pSSM,
420 pVCpu->em.s.enmPrevState == EMSTATE_NONE
421 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED
422 || pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
423 ? pVCpu->em.s.enmPrevState : EMSTATE_NONE);
424
425 /* Save mwait state. */
426 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
427 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
428 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
429 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
430 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
431 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
432 AssertRCReturn(rc, rc);
433 }
434 return VINF_SUCCESS;
435}
436
437
438/**
439 * Execute state load operation.
440 *
441 * @returns VBox status code.
442 * @param pVM The cross context VM structure.
443 * @param pSSM SSM operation handle.
444 * @param uVersion Data layout version.
445 * @param uPass The data pass.
446 */
447static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
448{
449 /*
450 * Validate version.
451 */
452 if ( uVersion > EM_SAVED_STATE_VERSION
453 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
454 {
455 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
456 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
457 }
458 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
459
460 /*
461 * Load the saved state.
462 */
463 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
464 {
465 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
466
467 bool fForceRAWIgnored;
468 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
469 AssertRCReturn(rc, rc);
470
471 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
472 {
473 /* We are only intereseted in two enmPrevState values for use when
474 EMR3ExecuteVM is called.
475 Since ~r157540. only these two and EMSTATE_NONE are saved. */
476 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
477 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
478 if ( pVCpu->em.s.enmPrevState != EMSTATE_WAIT_SIPI
479 && pVCpu->em.s.enmPrevState != EMSTATE_HALTED)
480 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
481
482 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
483 }
484 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
485 {
486 /* Load mwait state. */
487 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
488 AssertRCReturn(rc, rc);
489 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
490 AssertRCReturn(rc, rc);
491 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
492 AssertRCReturn(rc, rc);
493 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
494 AssertRCReturn(rc, rc);
495 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
496 AssertRCReturn(rc, rc);
497 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
498 AssertRCReturn(rc, rc);
499 }
500 }
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Argument packet for emR3SetExecutionPolicy.
507 */
508struct EMR3SETEXECPOLICYARGS
509{
510 EMEXECPOLICY enmPolicy;
511 bool fEnforce;
512};
513
514
515/**
516 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
517 */
518static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
519{
520 /*
521 * Only the first CPU changes the variables.
522 */
523 if (pVCpu->idCpu == 0)
524 {
525 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
526 switch (pArgs->enmPolicy)
527 {
528 case EMEXECPOLICY_IEM_ALL:
529 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
530
531 /* For making '.alliem 1' useful during debugging, transition the
532 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
533 for (VMCPUID i = 0; i < pVM->cCpus; i++)
534 {
535 PVMCPU pVCpuX = pVM->apCpusR3[i];
536 switch (pVCpuX->em.s.enmState)
537 {
538 case EMSTATE_DEBUG_GUEST_RECOMPILER:
539 if (pVM->em.s.fIemRecompiled)
540 break;
541 RT_FALL_THROUGH();
542 case EMSTATE_DEBUG_GUEST_RAW:
543 case EMSTATE_DEBUG_GUEST_HM:
544 case EMSTATE_DEBUG_GUEST_NEM:
545 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
546 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
547 break;
548 case EMSTATE_DEBUG_GUEST_IEM:
549 default:
550 break;
551 }
552 }
553 break;
554
555 case EMEXECPOLICY_IEM_RECOMPILED:
556 pVM->em.s.fIemRecompiled = pArgs->fEnforce;
557 break;
558
559 default:
560 AssertFailedReturn(VERR_INVALID_PARAMETER);
561 }
562 Log(("EM: Set execution policy: fIemExecutesAll=%RTbool fIemRecompiled=%RTbool\n",
563 pVM->em.s.fIemExecutesAll, pVM->em.s.fIemRecompiled));
564 }
565
566 /*
567 * Force rescheduling if in HM, NEM, IEM/interpreter or IEM/recompiler.
568 */
569 Assert(pVCpu->em.s.enmState != EMSTATE_RAW_OBSOLETE);
570 return pVCpu->em.s.enmState == EMSTATE_HM
571 || pVCpu->em.s.enmState == EMSTATE_NEM
572 || pVCpu->em.s.enmState == EMSTATE_IEM
573 || pVCpu->em.s.enmState == EMSTATE_RECOMPILER
574 ? VINF_EM_RESCHEDULE
575 : VINF_SUCCESS;
576}
577
578
579/**
580 * Changes an execution scheduling policy parameter.
581 *
582 * This is used to enable or disable raw-mode / hardware-virtualization
583 * execution of user and supervisor code.
584 *
585 * @returns VINF_SUCCESS on success.
586 * @returns VINF_RESCHEDULE if a rescheduling might be required.
587 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
588 *
589 * @param pUVM The user mode VM handle.
590 * @param enmPolicy The scheduling policy to change.
591 * @param fEnforce Whether to enforce the policy or not.
592 */
593VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
594{
595 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
596 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
597 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
598
599 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
600 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
601}
602
603
604/**
605 * Queries an execution scheduling policy parameter.
606 *
607 * @returns VBox status code
608 * @param pUVM The user mode VM handle.
609 * @param enmPolicy The scheduling policy to query.
610 * @param pfEnforced Where to return the current value.
611 */
612VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
613{
614 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
615 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
616 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
617 PVM pVM = pUVM->pVM;
618 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
619
620 /* No need to bother EMTs with a query. */
621 switch (enmPolicy)
622 {
623 case EMEXECPOLICY_IEM_ALL:
624 *pfEnforced = pVM->em.s.fIemExecutesAll;
625 break;
626 case EMEXECPOLICY_IEM_RECOMPILED:
627 *pfEnforced = pVM->em.s.fIemRecompiled;
628 break;
629 default:
630 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
631 }
632
633 return VINF_SUCCESS;
634}
635
636
637/**
638 * Queries the main execution engine of the VM.
639 *
640 * @returns VBox status code
641 * @param pUVM The user mode VM handle.
642 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
643 */
644VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
645{
646 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
647 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
648
649 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
650 PVM pVM = pUVM->pVM;
651 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
652
653 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
654 return VINF_SUCCESS;
655}
656
657
658/**
659 * Raise a fatal error.
660 *
661 * Safely terminate the VM with full state report and stuff. This function
662 * will naturally never return.
663 *
664 * @param pVCpu The cross context virtual CPU structure.
665 * @param rc VBox status code.
666 */
667VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
668{
669 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
670 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
671}
672
673
674#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
675/**
676 * Gets the EM state name.
677 *
678 * @returns pointer to read only state name,
679 * @param enmState The state.
680 */
681static const char *emR3GetStateName(EMSTATE enmState)
682{
683 switch (enmState)
684 {
685 case EMSTATE_NONE: return "EMSTATE_NONE";
686 case EMSTATE_RAW_OBSOLETE: return "EMSTATE_RAW_OBSOLETE";
687 case EMSTATE_HM: return "EMSTATE_HM";
688 case EMSTATE_IEM: return "EMSTATE_IEM";
689 case EMSTATE_RECOMPILER: return "EMSTATE_RECOMPILER";
690 case EMSTATE_HALTED: return "EMSTATE_HALTED";
691 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
692 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
693 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
694 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
695 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
696 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
697 case EMSTATE_DEBUG_GUEST_RECOMPILER: return "EMSTATE_DEBUG_GUEST_RECOMPILER";
698 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
699 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
700 case EMSTATE_IEM_THEN_REM_OBSOLETE: return "EMSTATE_IEM_THEN_REM_OBSOLETE";
701 case EMSTATE_NEM: return "EMSTATE_NEM";
702 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
703 default: return "Unknown!";
704 }
705}
706#endif /* LOG_ENABLED || VBOX_STRICT */
707
708#if !defined(VBOX_VMM_TARGET_ARMV8)
709
710/**
711 * Handle pending ring-3 I/O port write.
712 *
713 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
714 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
715 *
716 * @returns Strict VBox status code.
717 * @param pVM The cross context VM structure.
718 * @param pVCpu The cross context virtual CPU structure.
719 */
720VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
721{
722 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
723
724 /* Get and clear the pending data. */
725 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
726 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
727 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
728 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
729 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
730
731 /* Assert sanity. */
732 switch (cbValue)
733 {
734 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
735 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
736 case 4: break;
737 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
738 }
739 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
740
741 /* Do the work.*/
742 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
743 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
744 if (IOM_SUCCESS(rcStrict))
745 {
746 pVCpu->cpum.GstCtx.rip += cbInstr;
747 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
748 }
749 return rcStrict;
750}
751
752
753/**
754 * Handle pending ring-3 I/O port write.
755 *
756 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
757 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
758 *
759 * @returns Strict VBox status code.
760 * @param pVM The cross context VM structure.
761 * @param pVCpu The cross context virtual CPU structure.
762 */
763VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
764{
765 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
766
767 /* Get and clear the pending data. */
768 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
769 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
770 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
771 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
772
773 /* Assert sanity. */
774 switch (cbValue)
775 {
776 case 1: break;
777 case 2: break;
778 case 4: break;
779 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
780 }
781 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
782 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
783
784 /* Do the work.*/
785 uint32_t uValue = 0;
786 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
787 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
788 if (IOM_SUCCESS(rcStrict))
789 {
790 if (cbValue == 4)
791 pVCpu->cpum.GstCtx.rax = uValue;
792 else if (cbValue == 2)
793 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
794 else
795 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
796 pVCpu->cpum.GstCtx.rip += cbInstr;
797 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
798 }
799 return rcStrict;
800}
801
802
803/**
804 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
805 * Worker for emR3ExecuteSplitLockInstruction}
806 */
807static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
808{
809 /* Only execute on the specified EMT. */
810 if (pVCpu == (PVMCPU)pvUser)
811 {
812 LogFunc(("\n"));
813 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
814 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
815 if (rcStrict == VINF_IEM_RAISED_XCPT)
816 rcStrict = VINF_SUCCESS;
817 return rcStrict;
818 }
819 RT_NOREF(pVM);
820 return VINF_SUCCESS;
821}
822
823
824/**
825 * Handle an instruction causing a split cacheline lock access in SMP VMs.
826 *
827 * Generally we only get here if the host has split-lock detection enabled and
828 * this caused an \#AC because of something the guest did. If we interpret the
829 * instruction as-is, we'll likely just repeat the split-lock access and
830 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
831 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
832 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
833 * disregard the lock prefix when emulating the instruction.
834 *
835 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
836 * feature when entering guest context, but the support for the feature isn't a
837 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
838 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
839 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
840 * propert detection to SUPDrv later if we find it necessary.
841 *
842 * @see @bugref{10052}
843 *
844 * @returns Strict VBox status code.
845 * @param pVM The cross context VM structure.
846 * @param pVCpu The cross context virtual CPU structure.
847 */
848VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
849{
850 LogFunc(("\n"));
851 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
852}
853
854#endif /* VBOX_VMM_TARGET_ARMV8 */
855
856/**
857 * Debug loop.
858 *
859 * @returns VBox status code for EM.
860 * @param pVM The cross context VM structure.
861 * @param pVCpu The cross context virtual CPU structure.
862 * @param rc Current EM VBox status code.
863 */
864static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
865{
866 for (;;)
867 {
868 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
869 const VBOXSTRICTRC rcLast = rc;
870
871 /*
872 * Debug related RC.
873 */
874 switch (VBOXSTRICTRC_VAL(rc))
875 {
876 /*
877 * Single step an instruction.
878 */
879 case VINF_EM_DBG_STEP:
880 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
881 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
882 AssertLogRelMsgFailedStmt(("Bad EM state."), rc = VERR_EM_INTERNAL_ERROR);
883#if !defined(VBOX_VMM_TARGET_ARMV8)
884 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
885 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
886 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
887 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
888 else
889 {
890 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
891 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
892 rc = VINF_EM_DBG_STEPPED;
893 }
894
895 if (rc != VINF_EM_EMULATE_SPLIT_LOCK)
896 { /* likely */ }
897 else
898 {
899 rc = emR3ExecuteSplitLockInstruction(pVM, pVCpu);
900 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
901 rc = VINF_EM_DBG_STEPPED;
902 }
903#else
904 AssertMsg(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM,
905 ("%u\n", pVCpu->em.s.enmState));
906 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
907#endif
908 break;
909
910 /*
911 * Simple events: stepped, breakpoint, stop/assertion.
912 */
913 case VINF_EM_DBG_STEPPED:
914 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
915 break;
916
917 case VINF_EM_DBG_BREAKPOINT:
918 rc = DBGFR3BpHit(pVM, pVCpu);
919 break;
920
921 case VINF_EM_DBG_STOP:
922 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
923 break;
924
925 case VINF_EM_DBG_EVENT:
926 rc = DBGFR3EventHandlePending(pVM, pVCpu);
927 break;
928
929 case VINF_EM_DBG_HYPER_STEPPED:
930 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
931 break;
932
933 case VINF_EM_DBG_HYPER_BREAKPOINT:
934 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
935 break;
936
937 case VINF_EM_DBG_HYPER_ASSERTION:
938 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
939 RTLogFlush(NULL);
940 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
941 break;
942
943 /*
944 * Guru meditation.
945 */
946 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
947 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
948 break;
949 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
950 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
951 break;
952
953 default: /** @todo don't use default for guru, but make special errors code! */
954 {
955 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
956 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
957 break;
958 }
959 }
960
961 /*
962 * Process the result.
963 */
964 switch (VBOXSTRICTRC_VAL(rc))
965 {
966 /*
967 * Continue the debugging loop.
968 */
969 case VINF_EM_DBG_STEP:
970 case VINF_EM_DBG_STOP:
971 case VINF_EM_DBG_EVENT:
972 case VINF_EM_DBG_STEPPED:
973 case VINF_EM_DBG_BREAKPOINT:
974 case VINF_EM_DBG_HYPER_STEPPED:
975 case VINF_EM_DBG_HYPER_BREAKPOINT:
976 case VINF_EM_DBG_HYPER_ASSERTION:
977 break;
978
979 /*
980 * Resuming execution (in some form) has to be done here if we got
981 * a hypervisor debug event.
982 */
983 case VINF_SUCCESS:
984 case VINF_EM_RESUME:
985 case VINF_EM_SUSPEND:
986 case VINF_EM_RESCHEDULE:
987 case VINF_EM_RESCHEDULE_REM:
988 case VINF_EM_HALT:
989 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
990 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
991 if (rc == VINF_SUCCESS)
992 rc = VINF_EM_RESCHEDULE;
993 return rc;
994
995 /*
996 * The debugger isn't attached.
997 * We'll simply turn the thing off since that's the easiest thing to do.
998 */
999 case VERR_DBGF_NOT_ATTACHED:
1000 switch (VBOXSTRICTRC_VAL(rcLast))
1001 {
1002 case VINF_EM_DBG_HYPER_STEPPED:
1003 case VINF_EM_DBG_HYPER_BREAKPOINT:
1004 case VINF_EM_DBG_HYPER_ASSERTION:
1005 case VERR_TRPM_PANIC:
1006 case VERR_TRPM_DONT_PANIC:
1007 case VERR_VMM_RING0_ASSERTION:
1008 case VERR_VMM_HYPER_CR3_MISMATCH:
1009 case VERR_VMM_RING3_CALL_DISABLED:
1010 return rcLast;
1011 }
1012 return VINF_EM_OFF;
1013
1014 /*
1015 * Status codes terminating the VM in one or another sense.
1016 */
1017 case VINF_EM_TERMINATE:
1018 case VINF_EM_OFF:
1019 case VINF_EM_RESET:
1020 case VINF_EM_NO_MEMORY:
1021 case VINF_EM_RAW_STALE_SELECTOR:
1022 case VINF_EM_RAW_IRET_TRAP:
1023 case VERR_TRPM_PANIC:
1024 case VERR_TRPM_DONT_PANIC:
1025 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1026 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1027 case VERR_VMM_RING0_ASSERTION:
1028 case VERR_VMM_HYPER_CR3_MISMATCH:
1029 case VERR_VMM_RING3_CALL_DISABLED:
1030 case VERR_INTERNAL_ERROR:
1031 case VERR_INTERNAL_ERROR_2:
1032 case VERR_INTERNAL_ERROR_3:
1033 case VERR_INTERNAL_ERROR_4:
1034 case VERR_INTERNAL_ERROR_5:
1035 case VERR_IPE_UNEXPECTED_STATUS:
1036 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1037 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1038 return rc;
1039
1040 /*
1041 * The rest is unexpected, and will keep us here.
1042 */
1043 default:
1044 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1045 break;
1046 }
1047 } /* debug for ever */
1048}
1049
1050
1051/**
1052 * Executes recompiled code.
1053 *
1054 * This function contains the recompiler version of the inner
1055 * execution loop (the outer loop being in EMR3ExecuteVM()).
1056 *
1057 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1058 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1059 *
1060 * @param pVM The cross context VM structure.
1061 * @param pVCpu The cross context virtual CPU structure.
1062 * @param fWasHalted Set if we're comming out of a CPU HALT state.
1063 * @param pfFFDone Where to store an indicator telling whether or not
1064 * FFs were done before returning.
1065 *
1066 */
1067static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool fWasHalted, bool *pfFFDone)
1068{
1069 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a);
1070#ifdef VBOX_VMM_TARGET_ARMV8
1071 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64));
1072#else
1073 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
1074#endif
1075
1076 /*
1077 * Loop till we get a forced action which returns anything but VINF_SUCCESS.
1078 */
1079 *pfFFDone = false;
1080 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1081 for (;;)
1082 {
1083#ifdef LOG_ENABLED
1084# if defined(VBOX_VMM_TARGET_ARMV8)
1085 Log3(("EM: pc=%08x\n", CPUMGetGuestFlatPC(pVCpu)));
1086# else
1087 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1088 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel,
1089 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF,
1090 (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1091 else
1092 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1093# endif
1094#endif
1095
1096 /*
1097 * Execute.
1098 */
1099 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1100 {
1101 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1102#ifdef VBOX_WITH_IEM_RECOMPILER
1103 if (pVM->em.s.fIemRecompiled)
1104 rcStrict = IEMExecRecompiler(pVM, pVCpu, fWasHalted);
1105 else
1106#endif
1107 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/);
1108 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1109 }
1110 else
1111 {
1112 /* Give up this time slice; virtual time continues */
1113 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1114 RTThreadSleep(5);
1115 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1116 rcStrict = VINF_SUCCESS;
1117 }
1118
1119 /*
1120 * Deal with high priority post execution FFs before doing anything
1121 * else. Sync back the state and leave the lock to be on the safe side.
1122 */
1123 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1124 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1125 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
1126
1127 /*
1128 * Process the returned status code.
1129 */
1130 if (rcStrict != VINF_SUCCESS)
1131 {
1132#ifndef VBOX_VMM_TARGET_ARMV8
1133 if (rcStrict == VINF_EM_EMULATE_SPLIT_LOCK)
1134 rcStrict = emR3ExecuteSplitLockInstruction(pVM, pVCpu);
1135#endif
1136 if (rcStrict != VINF_SUCCESS)
1137 {
1138#if 0
1139 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST))
1140 break;
1141 /* Fatal error: */
1142#endif
1143 break;
1144 }
1145 }
1146
1147
1148 /*
1149 * Check and execute forced actions.
1150 *
1151 * Sync back the VM state and leave the lock before calling any of
1152 * these, you never know what's going to happen here.
1153 */
1154#ifdef VBOX_HIGH_RES_TIMERS_HACK
1155 TMTimerPollVoid(pVM, pVCpu);
1156#endif
1157 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1158 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1159 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1160 {
1161 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
1162 VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1163 if ( rcStrict != VINF_SUCCESS
1164 && rcStrict != VINF_EM_RESCHEDULE_REM)
1165 {
1166 *pfFFDone = true;
1167 break;
1168 }
1169 }
1170
1171 /*
1172 * Check if we can switch back to the main execution engine now.
1173 */
1174#if !defined(VBOX_VMM_TARGET_ARMV8)
1175 if (VM_IS_HM_ENABLED(pVM))
1176 {
1177 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1178 {
1179 *pfFFDone = true;
1180 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1181 break;
1182 }
1183 }
1184 else
1185#endif
1186 if (VM_IS_NEM_ENABLED(pVM))
1187 {
1188 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1189 {
1190 *pfFFDone = true;
1191 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1192 break;
1193 }
1194 }
1195
1196#ifdef VBOX_WITH_IEM_RECOMPILER
1197 fWasHalted = false;
1198#else
1199 RT_NOREF(fWasHalted);
1200#endif
1201 } /* The Inner Loop, recompiled execution mode version. */
1202
1203 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatREMTotal, a);
1204 return rcStrict;
1205}
1206
1207
1208/**
1209 * Decides whether to execute HM, NEM, IEM/interpreter or IEM/recompiler.
1210 *
1211 * @returns new EM state
1212 * @param pVM The cross context VM structure.
1213 * @param pVCpu The cross context virtual CPU structure.
1214 */
1215EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1216{
1217 /*
1218 * We stay in the wait for SIPI state unless explicitly told otherwise.
1219 */
1220 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1221 return EMSTATE_WAIT_SIPI;
1222
1223 /*
1224 * Execute everything in IEM?
1225 */
1226 if ( pVM->em.s.fIemExecutesAll
1227 || VM_IS_EXEC_ENGINE_IEM(pVM))
1228#ifdef VBOX_WITH_IEM_RECOMPILER
1229 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1230#else
1231 return EMSTATE_IEM;
1232#endif
1233
1234#if !defined(VBOX_VMM_TARGET_ARMV8)
1235 if (VM_IS_HM_ENABLED(pVM))
1236 {
1237 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1238 return EMSTATE_HM;
1239 }
1240 else
1241#endif
1242 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1243 return EMSTATE_NEM;
1244
1245 /*
1246 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1247 * turns off monitoring features essential for raw mode!
1248 */
1249#ifdef VBOX_WITH_IEM_RECOMPILER
1250 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1251#else
1252 return EMSTATE_IEM;
1253#endif
1254}
1255
1256
1257/**
1258 * Executes all high priority post execution force actions.
1259 *
1260 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1261 * fatal error status code.
1262 *
1263 * @param pVM The cross context VM structure.
1264 * @param pVCpu The cross context virtual CPU structure.
1265 * @param rc The current strict VBox status code rc.
1266 */
1267VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1268{
1269 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1270
1271 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1272 PDMCritSectBothFF(pVM, pVCpu);
1273
1274#if !defined(VBOX_VMM_TARGET_ARMV8)
1275 /* Update CR3 (Nested Paging case for HM). */
1276 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1277 {
1278 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1279 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1280 if (RT_FAILURE(rc2))
1281 return rc2;
1282 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1283 }
1284#endif
1285
1286 /* IEM has pending work (typically memory write after INS instruction). */
1287 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1288 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1289
1290 /* IOM has pending work (comitting an I/O or MMIO write). */
1291 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1292 {
1293 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1294 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1295 { /* half likely, or at least it's a line shorter. */ }
1296 else if (rc == VINF_SUCCESS)
1297 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1298 else
1299 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1300 }
1301
1302 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1303 {
1304 if ( rc > VINF_EM_NO_MEMORY
1305 && rc <= VINF_EM_LAST)
1306 rc = VINF_EM_NO_MEMORY;
1307 }
1308
1309 return rc;
1310}
1311
1312
1313#if !defined(VBOX_VMM_TARGET_ARMV8)
1314/**
1315 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1316 *
1317 * @returns VBox status code.
1318 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1319 * @param pVCpu The cross context virtual CPU structure.
1320 */
1321static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1322{
1323#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1324 /* Handle the "external interrupt" VM-exit intercept. */
1325 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1326 && !CPUMIsGuestVmxExitCtlsSet(&pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1327 {
1328 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1329 AssertMsg( rcStrict != VINF_VMX_VMEXIT /* VM-exit should have been converted to VINF_SUCCESS. */
1330 && rcStrict != VINF_NO_CHANGE
1331 && rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1332 return VBOXSTRICTRC_VAL(rcStrict);
1333 }
1334#else
1335 RT_NOREF(pVCpu);
1336#endif
1337 return VINF_NO_CHANGE;
1338}
1339
1340
1341/**
1342 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1343 *
1344 * @returns VBox status code.
1345 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1346 * @param pVCpu The cross context virtual CPU structure.
1347 */
1348static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1349{
1350#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1351 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1352 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1353 {
1354 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1355 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1356 if (RT_SUCCESS(rcStrict))
1357 {
1358 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1359 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1360 return VBOXSTRICTRC_VAL(rcStrict);
1361 }
1362
1363 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1364 return VINF_EM_TRIPLE_FAULT;
1365 }
1366#else
1367 NOREF(pVCpu);
1368#endif
1369 return VINF_NO_CHANGE;
1370}
1371
1372
1373/**
1374 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1375 *
1376 * @returns VBox status code.
1377 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1378 * @param pVCpu The cross context virtual CPU structure.
1379 */
1380static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1381{
1382#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1383 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1384 {
1385 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1386 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1387 if (RT_SUCCESS(rcStrict))
1388 {
1389 Assert(rcStrict != VINF_SVM_VMEXIT);
1390 return VBOXSTRICTRC_VAL(rcStrict);
1391 }
1392 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1393 return VINF_EM_TRIPLE_FAULT;
1394 }
1395#else
1396 NOREF(pVCpu);
1397#endif
1398 return VINF_NO_CHANGE;
1399}
1400#endif
1401
1402
1403/**
1404 * Executes all pending forced actions.
1405 *
1406 * Forced actions can cause execution delays and execution
1407 * rescheduling. The first we deal with using action priority, so
1408 * that for instance pending timers aren't scheduled and ran until
1409 * right before execution. The rescheduling we deal with using
1410 * return codes. The same goes for VM termination, only in that case
1411 * we exit everything.
1412 *
1413 * @returns VBox status code of equal or greater importance/severity than rc.
1414 * The most important ones are: VINF_EM_RESCHEDULE,
1415 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1416 *
1417 * @param pVM The cross context VM structure.
1418 * @param pVCpu The cross context virtual CPU structure.
1419 * @param rc The current rc.
1420 *
1421 */
1422int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1423{
1424 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1425#ifdef VBOX_STRICT
1426 int rcIrq = VINF_SUCCESS;
1427#endif
1428 int rc2;
1429#define UPDATE_RC() \
1430 do { \
1431 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1432 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1433 break; \
1434 if (!rc || rc2 < rc) \
1435 rc = rc2; \
1436 } while (0)
1437 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1438
1439 /*
1440 * Post execution chunk first.
1441 */
1442 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1443 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1444 {
1445 /*
1446 * EMT Rendezvous (must be serviced before termination).
1447 */
1448 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1449 {
1450 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1451 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1452 UPDATE_RC();
1453 /** @todo HACK ALERT! The following test is to make sure EM+TM
1454 * thinks the VM is stopped/reset before the next VM state change
1455 * is made. We need a better solution for this, or at least make it
1456 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1457 * VINF_EM_SUSPEND). */
1458 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1459 {
1460 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1461 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1462 return rc;
1463 }
1464 }
1465
1466 /*
1467 * State change request (cleared by vmR3SetStateLocked).
1468 */
1469 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1470 {
1471 VMSTATE enmState = VMR3GetState(pVM);
1472 switch (enmState)
1473 {
1474 case VMSTATE_FATAL_ERROR:
1475 case VMSTATE_FATAL_ERROR_LS:
1476 case VMSTATE_GURU_MEDITATION:
1477 case VMSTATE_GURU_MEDITATION_LS:
1478 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1479 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1480 return VINF_EM_SUSPEND;
1481
1482 case VMSTATE_DESTROYING:
1483 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1484 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1485 return VINF_EM_TERMINATE;
1486
1487 default:
1488 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1489 }
1490 }
1491
1492 /*
1493 * Debugger Facility polling.
1494 */
1495 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1496 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1497 {
1498 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1499 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1500 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1501 * somewhere before we get here, I would think. */
1502 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1503 rc = rc2;
1504 else
1505 UPDATE_RC();
1506 }
1507
1508 /*
1509 * Postponed reset request.
1510 */
1511 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1512 {
1513 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1514 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1515 UPDATE_RC();
1516 }
1517
1518 /*
1519 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1520 */
1521 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1522 {
1523 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1524 UPDATE_RC();
1525 if (rc == VINF_EM_NO_MEMORY)
1526 return rc;
1527 }
1528
1529 /* check that we got them all */
1530 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1531 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1532 }
1533
1534 /*
1535 * Normal priority then.
1536 * (Executed in no particular order.)
1537 */
1538 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1539 {
1540 /*
1541 * PDM Queues are pending.
1542 */
1543 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1544 PDMR3QueueFlushAll(pVM);
1545
1546 /*
1547 * PDM DMA transfers are pending.
1548 */
1549 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1550 PDMR3DmaRun(pVM);
1551
1552 /*
1553 * EMT Rendezvous (make sure they are handled before the requests).
1554 */
1555 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1556 {
1557 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1558 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1559 UPDATE_RC();
1560 /** @todo HACK ALERT! The following test is to make sure EM+TM
1561 * thinks the VM is stopped/reset before the next VM state change
1562 * is made. We need a better solution for this, or at least make it
1563 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1564 * VINF_EM_SUSPEND). */
1565 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1566 {
1567 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1568 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1569 return rc;
1570 }
1571 }
1572
1573 /*
1574 * Requests from other threads.
1575 */
1576 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1577 {
1578 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1579 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1580 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1581 {
1582 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1583 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1584 return rc2;
1585 }
1586 UPDATE_RC();
1587 /** @todo HACK ALERT! The following test is to make sure EM+TM
1588 * thinks the VM is stopped/reset before the next VM state change
1589 * is made. We need a better solution for this, or at least make it
1590 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1591 * VINF_EM_SUSPEND). */
1592 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1593 {
1594 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1595 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1596 return rc;
1597 }
1598 }
1599
1600 /* check that we got them all */
1601 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1602 }
1603
1604 /*
1605 * Normal priority then. (per-VCPU)
1606 * (Executed in no particular order.)
1607 */
1608 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1609 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1610 {
1611 /*
1612 * Requests from other threads.
1613 */
1614 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1615 {
1616 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1617 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1618 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1619 {
1620 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1621 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1622 return rc2;
1623 }
1624 UPDATE_RC();
1625 /** @todo HACK ALERT! The following test is to make sure EM+TM
1626 * thinks the VM is stopped/reset before the next VM state change
1627 * is made. We need a better solution for this, or at least make it
1628 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1629 * VINF_EM_SUSPEND). */
1630 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1631 {
1632 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1633 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1634 return rc;
1635 }
1636 }
1637
1638 /* check that we got them all */
1639 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1640 }
1641
1642 /*
1643 * High priority pre execution chunk last.
1644 * (Executed in ascending priority order.)
1645 */
1646 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1647 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1648 {
1649 /*
1650 * Timers before interrupts.
1651 */
1652 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1653 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1654 TMR3TimerQueuesDo(pVM);
1655
1656#if !defined(VBOX_VMM_TARGET_ARMV8)
1657 /*
1658 * Pick up asynchronously posted interrupts into the APIC.
1659 */
1660 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1661 APICUpdatePendingInterrupts(pVCpu);
1662
1663 /*
1664 * The instruction following an emulated STI should *always* be executed!
1665 *
1666 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1667 * the eip is the same as the inhibited instr address. Before we
1668 * are able to execute this instruction in raw mode (iret to
1669 * guest code) an external interrupt might force a world switch
1670 * again. Possibly allowing a guest interrupt to be dispatched
1671 * in the process. This could break the guest. Sounds very
1672 * unlikely, but such timing sensitive problem are not as rare as
1673 * you might think.
1674 *
1675 * Note! This used to be a force action flag. Can probably ditch this code.
1676 */
1677 /** @todo r=bird: the clearing case will *never* be taken here as
1678 * CPUMIsInInterruptShadow already makes sure the RIPs matches. */
1679 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1680 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1681 {
1682 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1683 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1684 {
1685 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1686 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1687 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1688 }
1689 else
1690 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1691 }
1692
1693 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1694 * delivered. */
1695
1696# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1697 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1698 | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW))
1699 {
1700 /*
1701 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1702 * Takes priority over even SMI and INIT signals.
1703 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1704 */
1705 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1706 {
1707 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1708 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1709 UPDATE_RC();
1710 }
1711
1712 /*
1713 * APIC write emulation MAY have a caused a VM-exit.
1714 * If it did cause a VM-exit, there's no point checking the other VMX non-root mode FFs here.
1715 */
1716 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
1717 {
1718 /*
1719 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1720 * Takes priority over "Traps on the previous instruction".
1721 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1722 */
1723 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1724 {
1725 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1726 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1727 UPDATE_RC();
1728 }
1729 /*
1730 * VMX Nested-guest preemption timer VM-exit.
1731 * Takes priority over NMI-window VM-exits.
1732 */
1733 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1734 {
1735 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1736 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1737 UPDATE_RC();
1738 }
1739 /*
1740 * VMX interrupt-window and NMI-window VM-exits.
1741 * Takes priority over non-maskable interrupts (NMIs) and external interrupts respectively.
1742 * If we are in an interrupt shadow or if we already in the process of delivering
1743 * an event then these VM-exits cannot occur.
1744 *
1745 * Interrupt shadows block NMI-window VM-exits.
1746 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1747 *
1748 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1749 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1750 * See Intel spec. 6.7 "Nonmaskable Interrupt (NMI)".
1751 */
1752 else if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1753 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)
1754 && !TRPMHasTrap(pVCpu))
1755 {
1756 /*
1757 * VMX NMI-window VM-exit.
1758 */
1759 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1760 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1761 {
1762 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1763 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1764 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1765 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1766 && rc2 != VINF_VMX_VMEXIT
1767 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1768 UPDATE_RC();
1769 }
1770 /*
1771 * VMX interrupt-window VM-exit.
1772 * This is a bit messy with the way the code below is currently structured,
1773 * but checking VMCPU_FF_INTERRUPT_NMI here (combined with CPUMAreInterruptsInhibitedByNmi
1774 * already checked at this point) should allow a pending NMI to be delivered prior to
1775 * causing an interrupt-window VM-exit.
1776 */
1777 /** @todo Restructure this later to happen after injecting NMI/causing NMI-exit, see
1778 * code in VMX R0 event delivery. */
1779 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1780 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1781 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1782 {
1783 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1784 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1785 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1786 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1787 && rc2 != VINF_VMX_VMEXIT
1788 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1789 UPDATE_RC();
1790 }
1791 }
1792 }
1793
1794 /*
1795 * Interrupt-window and NMI-window force flags might still be pending if we didn't actually cause
1796 * a VM-exit above. They will get cleared eventually when ANY nested-guest VM-exit occurs.
1797 * However, the force flags asserted below MUST have been cleared at this point.
1798 */
1799 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1800 }
1801# endif
1802
1803 /*
1804 * Guest event injection.
1805 */
1806 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1807 bool fWakeupPending = false;
1808 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1809 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1810 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1811 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1812 && (!rc || rc >= VINF_EM_RESCHEDULE_EXEC_ENGINE)
1813 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1814 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1815 {
1816 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1817 {
1818 bool fInVmxNonRootMode;
1819 bool fInSvmHwvirtMode;
1820 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1821 {
1822 fInVmxNonRootMode = false;
1823 fInSvmHwvirtMode = false;
1824 }
1825 else
1826 {
1827 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1828 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1829 }
1830
1831 /*
1832 * NMIs (take priority over external interrupts).
1833 */
1834 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1835 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1836 {
1837# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1838 if ( fInVmxNonRootMode
1839 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1840 {
1841 /* We MUST clear the NMI force-flag here, see @bugref{10318#c19}. */
1842 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1843 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1844 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1845 UPDATE_RC();
1846 }
1847 else
1848# endif
1849# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1850 if ( fInSvmHwvirtMode
1851 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1852 {
1853 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1854 AssertMsg( rc2 != VINF_SVM_VMEXIT
1855 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1856 UPDATE_RC();
1857 }
1858 else
1859# endif
1860 {
1861 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_NMI);
1862 if (rc2 == VINF_SUCCESS)
1863 {
1864 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1865 fWakeupPending = true;
1866# if 0 /* HMR3IsActive is not reliable (esp. after restore), just return VINF_EM_RESCHEDULE. */
1867 if (pVM->em.s.fIemExecutesAll)
1868 rc2 = VINF_EM_RESCHEDULE;
1869 else
1870 {
1871 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1872 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1873 : VINF_EM_RESCHEDULE_REM;
1874 }
1875# else
1876 rc2 = VINF_EM_RESCHEDULE;
1877# endif
1878 }
1879 UPDATE_RC();
1880 }
1881 }
1882# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1883 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1884 * actually pending like we currently do. */
1885# endif
1886 /*
1887 * External interrupts.
1888 */
1889 else
1890 {
1891 /*
1892 * VMX: virtual interrupts takes priority over physical interrupts.
1893 * SVM: physical interrupts takes priority over virtual interrupts.
1894 */
1895 if ( fInVmxNonRootMode
1896 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1897 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1898 {
1899 /** @todo NSTVMX: virtual-interrupt delivery. */
1900 rc2 = VINF_SUCCESS;
1901 }
1902 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1903 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1904 {
1905 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1906 if (fInVmxNonRootMode)
1907 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1908 else if (fInSvmHwvirtMode)
1909 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1910 else
1911 rc2 = VINF_NO_CHANGE;
1912
1913 if (rc2 == VINF_NO_CHANGE)
1914 {
1915 bool fInjected = false;
1916 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1917 /** @todo this really isn't nice, should properly handle this */
1918 /* Note! This can still cause a VM-exit (on Intel). */
1919 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1920 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1921 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1922 fWakeupPending = true;
1923 if ( pVM->em.s.fIemExecutesAll
1924 && ( rc2 == VINF_EM_RESCHEDULE_REM
1925 || rc2 == VINF_EM_RESCHEDULE_EXEC_ENGINE))
1926 rc2 = VINF_EM_RESCHEDULE;
1927# ifdef VBOX_STRICT
1928 if (fInjected)
1929 rcIrq = rc2;
1930# endif
1931 }
1932 UPDATE_RC();
1933 }
1934 else if ( fInSvmHwvirtMode
1935 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1936 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1937 {
1938 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1939 if (rc2 == VINF_NO_CHANGE)
1940 {
1941 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1942 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1943 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1944 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1945 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1946 rc2 = VINF_EM_RESCHEDULE;
1947# ifdef VBOX_STRICT
1948 rcIrq = rc2;
1949# endif
1950 }
1951 UPDATE_RC();
1952 }
1953 }
1954 } /* CPUMGetGuestGif */
1955 }
1956
1957#else /* VBOX_VMM_TARGET_ARMV8 */
1958 bool fWakeupPending = false;
1959
1960 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VTIMER_ACTIVATED))
1961 {
1962 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VTIMER_ACTIVATED);
1963
1964 fWakeupPending = true;
1965 rc2 = VINF_EM_RESCHEDULE;
1966 UPDATE_RC();
1967 }
1968#endif /* VBOX_VMM_TARGET_ARMV8 */
1969
1970 /*
1971 * Allocate handy pages.
1972 */
1973 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1974 {
1975 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1976 UPDATE_RC();
1977 }
1978
1979 /*
1980 * Debugger Facility request.
1981 */
1982 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1983 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1984 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1985 {
1986 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1987 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1988 UPDATE_RC();
1989 }
1990
1991 /*
1992 * EMT Rendezvous (must be serviced before termination).
1993 */
1994 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1995 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1996 {
1997 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1998 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1999 UPDATE_RC();
2000 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2001 * stopped/reset before the next VM state change is made. We need a better
2002 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2003 * && rc >= VINF_EM_SUSPEND). */
2004 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2005 {
2006 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2007 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2008 return rc;
2009 }
2010 }
2011
2012 /*
2013 * State change request (cleared by vmR3SetStateLocked).
2014 */
2015 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2016 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2017 {
2018 VMSTATE enmState = VMR3GetState(pVM);
2019 switch (enmState)
2020 {
2021 case VMSTATE_FATAL_ERROR:
2022 case VMSTATE_FATAL_ERROR_LS:
2023 case VMSTATE_GURU_MEDITATION:
2024 case VMSTATE_GURU_MEDITATION_LS:
2025 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2026 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2027 return VINF_EM_SUSPEND;
2028
2029 case VMSTATE_DESTROYING:
2030 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2031 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2032 return VINF_EM_TERMINATE;
2033
2034 default:
2035 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2036 }
2037 }
2038
2039 /*
2040 * Out of memory? Since most of our fellow high priority actions may cause us
2041 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2042 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2043 * than us since we can terminate without allocating more memory.
2044 */
2045 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2046 {
2047 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2048 UPDATE_RC();
2049 if (rc == VINF_EM_NO_MEMORY)
2050 return rc;
2051 }
2052
2053 /*
2054 * If the virtual sync clock is still stopped, make TM restart it.
2055 */
2056 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2057 TMR3VirtualSyncFF(pVM, pVCpu);
2058
2059#ifdef DEBUG
2060 /*
2061 * Debug, pause the VM.
2062 */
2063 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2064 {
2065 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2066 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2067 return VINF_EM_SUSPEND;
2068 }
2069#endif
2070
2071 /* check that we got them all */
2072 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2073#if defined(VBOX_VMM_TARGET_ARMV8)
2074 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF));
2075#else
2076 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2077#endif
2078 }
2079
2080#undef UPDATE_RC
2081 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2082 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2083 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2084 return rc;
2085}
2086
2087
2088/**
2089 * Check if the preset execution time cap restricts guest execution scheduling.
2090 *
2091 * @returns true if allowed, false otherwise
2092 * @param pVM The cross context VM structure.
2093 * @param pVCpu The cross context virtual CPU structure.
2094 */
2095bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu)
2096{
2097 Assert(pVM->uCpuExecutionCap != 100);
2098 uint64_t cMsUserTime;
2099 uint64_t cMsKernelTime;
2100 if (RT_SUCCESS(RTThreadGetExecutionTimeMilli(&cMsKernelTime, &cMsUserTime)))
2101 {
2102 uint64_t const msTimeNow = RTTimeMilliTS();
2103 if (pVCpu->em.s.msTimeSliceStart + EM_TIME_SLICE < msTimeNow)
2104 {
2105 /* New time slice. */
2106 pVCpu->em.s.msTimeSliceStart = msTimeNow;
2107 pVCpu->em.s.cMsTimeSliceStartExec = cMsKernelTime + cMsUserTime;
2108 pVCpu->em.s.cMsTimeSliceExec = 0;
2109 }
2110 pVCpu->em.s.cMsTimeSliceExec = cMsKernelTime + cMsUserTime - pVCpu->em.s.cMsTimeSliceStartExec;
2111
2112 bool const fRet = pVCpu->em.s.cMsTimeSliceExec < (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100;
2113 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.msTimeSliceStart,
2114 pVCpu->em.s.cMsTimeSliceStartExec, pVCpu->em.s.cMsTimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2115 return fRet;
2116 }
2117 return true;
2118}
2119
2120
2121/**
2122 * Execute VM.
2123 *
2124 * This function is the main loop of the VM. The emulation thread
2125 * calls this function when the VM has been successfully constructed
2126 * and we're ready for executing the VM.
2127 *
2128 * Returning from this function means that the VM is turned off or
2129 * suspended (state already saved) and deconstruction is next in line.
2130 *
2131 * All interaction from other thread are done using forced actions
2132 * and signalling of the wait object.
2133 *
2134 * @returns VBox status code, informational status codes may indicate failure.
2135 * @param pVM The cross context VM structure.
2136 * @param pVCpu The cross context virtual CPU structure.
2137 */
2138VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2139{
2140 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2141 pVM,
2142 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2143 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2144 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2145 VM_ASSERT_EMT(pVM);
2146 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2147 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2148 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2149 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2150
2151 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2152 if (rc == 0)
2153 {
2154 /*
2155 * Start the virtual time.
2156 */
2157 TMR3NotifyResume(pVM, pVCpu);
2158
2159 /*
2160 * The Outer Main Loop.
2161 */
2162 bool fFFDone = false;
2163
2164 /* Reschedule right away to start in the right state. */
2165 rc = VINF_SUCCESS;
2166
2167 /* If resuming after a pause or a state load, restore the previous
2168 state or else we'll start executing code. Else, just reschedule. */
2169 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2170 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2171 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2172 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2173 else
2174 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2175 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2176
2177 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2178 for (;;)
2179 {
2180 /*
2181 * Before we can schedule anything (we're here because
2182 * scheduling is required) we must service any pending
2183 * forced actions to avoid any pending action causing
2184 * immediate rescheduling upon entering an inner loop
2185 *
2186 * Do forced actions.
2187 */
2188 if ( !fFFDone
2189 && RT_SUCCESS(rc)
2190 && rc != VINF_EM_TERMINATE
2191 && rc != VINF_EM_OFF
2192 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2193 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2194 {
2195 rc = emR3ForcedActions(pVM, pVCpu, rc);
2196 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2197 }
2198 else if (fFFDone)
2199 fFFDone = false;
2200
2201#if defined(VBOX_STRICT) && !defined(VBOX_VMM_TARGET_ARMV8)
2202 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2203#endif
2204
2205 /*
2206 * Now what to do?
2207 */
2208 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2209 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2210 switch (rc)
2211 {
2212 /*
2213 * Keep doing what we're currently doing.
2214 */
2215 case VINF_SUCCESS:
2216 break;
2217
2218 /*
2219 * Reschedule - to main execution engine (HM, NEM, IEM/REM).
2220 */
2221 case VINF_EM_RESCHEDULE_EXEC_ENGINE:
2222 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2223 if (!pVM->em.s.fIemExecutesAll)
2224 {
2225#if !defined(VBOX_VMM_TARGET_ARMV8)
2226 if (VM_IS_HM_ENABLED(pVM))
2227 {
2228 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2229 {
2230 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2231 pVCpu->em.s.enmState = EMSTATE_HM;
2232 break;
2233 }
2234 }
2235 else
2236#endif
2237 if (VM_IS_NEM_ENABLED(pVM) && NEMR3CanExecuteGuest(pVM, pVCpu))
2238 {
2239 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2240 pVCpu->em.s.enmState = EMSTATE_NEM;
2241 break;
2242 }
2243 }
2244
2245 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_RECOMPILER)\n", enmOldState, EMSTATE_RECOMPILER));
2246 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2247 break;
2248
2249 /*
2250 * Reschedule - to recompiled execution.
2251 */
2252 case VINF_EM_RESCHEDULE_REM:
2253 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2254 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n",
2255 enmOldState, EMSTATE_RECOMPILER));
2256 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2257 break;
2258
2259 /*
2260 * Resume.
2261 */
2262 case VINF_EM_RESUME:
2263 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2264 /* Don't reschedule in the halted or wait-for-SIPI cases. */
2265 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2266 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2267 {
2268 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2269 break;
2270 }
2271 /* fall through and get scheduled. */
2272 RT_FALL_THRU();
2273
2274 /*
2275 * Reschedule.
2276 */
2277 case VINF_EM_RESCHEDULE:
2278 {
2279 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2280 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2281 pVCpu->em.s.enmState = enmState;
2282 break;
2283 }
2284
2285 /*
2286 * Halted.
2287 */
2288 case VINF_EM_HALT:
2289 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2290 pVCpu->em.s.enmState = EMSTATE_HALTED;
2291 break;
2292
2293 /*
2294 * Switch to the wait for SIPI state (application processor only)
2295 */
2296 case VINF_EM_WAIT_SIPI:
2297 Assert(pVCpu->idCpu != 0);
2298 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2299 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2300 break;
2301
2302
2303 /*
2304 * Suspend.
2305 */
2306 case VINF_EM_SUSPEND:
2307 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2308 Assert(enmOldState != EMSTATE_SUSPENDED);
2309 pVCpu->em.s.enmPrevState = enmOldState;
2310 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2311 break;
2312
2313 /*
2314 * Reset.
2315 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2316 */
2317 case VINF_EM_RESET:
2318 {
2319 if (pVCpu->idCpu == 0)
2320 {
2321 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2322 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2323 pVCpu->em.s.enmState = enmState;
2324 }
2325 else
2326 {
2327 /* All other VCPUs go into the wait for SIPI state. */
2328 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2329 }
2330 break;
2331 }
2332
2333 /*
2334 * Power Off.
2335 */
2336 case VINF_EM_OFF:
2337 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2338 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2339 TMR3NotifySuspend(pVM, pVCpu);
2340 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2341 return rc;
2342
2343 /*
2344 * Terminate the VM.
2345 */
2346 case VINF_EM_TERMINATE:
2347 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2348 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2349 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2350 TMR3NotifySuspend(pVM, pVCpu);
2351 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2352 return rc;
2353
2354
2355 /*
2356 * Out of memory, suspend the VM and stuff.
2357 */
2358 case VINF_EM_NO_MEMORY:
2359 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2360 Assert(enmOldState != EMSTATE_SUSPENDED);
2361 pVCpu->em.s.enmPrevState = enmOldState;
2362 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2363 TMR3NotifySuspend(pVM, pVCpu);
2364 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2365
2366 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2367 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2368 if (rc != VINF_EM_SUSPEND)
2369 {
2370 if (RT_SUCCESS_NP(rc))
2371 {
2372 AssertLogRelMsgFailed(("%Rrc\n", rc));
2373 rc = VERR_EM_INTERNAL_ERROR;
2374 }
2375 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2376 }
2377 return rc;
2378
2379 /*
2380 * Guest debug events.
2381 */
2382 case VINF_EM_DBG_STEPPED:
2383 case VINF_EM_DBG_STOP:
2384 case VINF_EM_DBG_EVENT:
2385 case VINF_EM_DBG_BREAKPOINT:
2386 case VINF_EM_DBG_STEP:
2387 if (enmOldState == EMSTATE_HM)
2388 {
2389 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2390 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2391 }
2392 else if (enmOldState == EMSTATE_NEM)
2393 {
2394 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2395 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2396 }
2397 else if (enmOldState == EMSTATE_RECOMPILER)
2398 {
2399 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RECOMPILER));
2400 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RECOMPILER;
2401 }
2402 else
2403 {
2404#ifdef VBOX_VMM_TARGET_ARMV8
2405 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2406 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM; /** @todo No IEM yet and this gets selected if enmOldState == EMSTATE_HALTED. */
2407#else
2408 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2409 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2410#endif
2411 }
2412 break;
2413
2414 /*
2415 * Hypervisor debug events.
2416 */
2417 case VINF_EM_DBG_HYPER_STEPPED:
2418 case VINF_EM_DBG_HYPER_BREAKPOINT:
2419 case VINF_EM_DBG_HYPER_ASSERTION:
2420 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2421 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2422 break;
2423
2424 /*
2425 * Triple fault.
2426 */
2427 case VINF_EM_TRIPLE_FAULT:
2428 if (!pVM->em.s.fGuruOnTripleFault)
2429 {
2430 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2431 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2432 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2433 continue;
2434 }
2435 /* Else fall through and trigger a guru. */
2436 RT_FALL_THRU();
2437
2438 case VERR_VMM_RING0_ASSERTION:
2439 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2440 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2441 break;
2442
2443 /*
2444 * Any error code showing up here other than the ones we
2445 * know and process above are considered to be FATAL.
2446 *
2447 * Unknown warnings and informational status codes are also
2448 * included in this.
2449 */
2450 default:
2451 if (RT_SUCCESS_NP(rc))
2452 {
2453 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2454 rc = VERR_EM_INTERNAL_ERROR;
2455 }
2456 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2457 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2458 break;
2459 }
2460
2461 /*
2462 * Act on state transition.
2463 */
2464 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2465 if (enmOldState != enmNewState)
2466 {
2467 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2468
2469 /* Clear MWait flags and the unhalt FF. */
2470 if ( enmOldState == EMSTATE_HALTED
2471 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2472 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2473 && ( enmNewState == EMSTATE_HM
2474 || enmNewState == EMSTATE_NEM
2475 || enmNewState == EMSTATE_RECOMPILER
2476 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2477 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2478 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2479 || enmNewState == EMSTATE_DEBUG_GUEST_RECOMPILER) )
2480 {
2481 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2482 {
2483 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2484 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2485 }
2486 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2487 {
2488 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2489 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2490 }
2491 }
2492 }
2493 else
2494 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2495
2496 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2497 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2498
2499 /*
2500 * Act on the new state.
2501 */
2502 switch (enmNewState)
2503 {
2504 /*
2505 * Execute hardware accelerated raw.
2506 */
2507 case EMSTATE_HM:
2508#if defined(VBOX_VMM_TARGET_ARMV8)
2509 AssertReleaseFailed(); /* Should never get here. */
2510#else
2511 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2512#endif
2513 break;
2514
2515 /*
2516 * Execute hardware accelerated raw.
2517 */
2518 case EMSTATE_NEM:
2519 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2520 break;
2521
2522 /*
2523 * Execute recompiled.
2524 */
2525 case EMSTATE_RECOMPILER:
2526 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, enmOldState == EMSTATE_HALTED, &fFFDone));
2527 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc));
2528 break;
2529
2530 /*
2531 * Execute in the interpreter.
2532 */
2533 case EMSTATE_IEM:
2534 {
2535#if 0 /* For comparing HM and IEM (@bugref{10464}). */
2536 PCPUMCTX const pCtx = &pVCpu->cpum.GstCtx;
2537 PCX86FXSTATE const pX87 = &pCtx->XState.x87;
2538 Log11(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
2539 "eip=%08x esp=%08x ebp=%08x eflags=%08x\n"
2540 "cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x\n"
2541 "fsw=%04x fcw=%04x ftw=%02x top=%u%s%s%s%s%s%s%s%s%s\n"
2542 "st0=%.10Rhxs st1=%.10Rhxs st2=%.10Rhxs st3=%.10Rhxs\n"
2543 "st4=%.10Rhxs st5=%.10Rhxs st6=%.10Rhxs st7=%.10Rhxs\n",
2544 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->edi, pCtx->edi,
2545 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.u,
2546 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, pCtx->fs.Sel, pCtx->gs.Sel,
2547 pX87->FSW, pX87->FCW, pX87->FTW, X86_FSW_TOP_GET(pX87->FSW),
2548 pX87->FSW & X86_FSW_ES ? " ES!" : "",
2549 pX87->FSW & X86_FSW_IE ? " IE" : "",
2550 pX87->FSW & X86_FSW_DE ? " DE" : "",
2551 pX87->FSW & X86_FSW_SF ? " SF" : "",
2552 pX87->FSW & X86_FSW_B ? " B!" : "",
2553 pX87->FSW & X86_FSW_C0 ? " C0" : "",
2554 pX87->FSW & X86_FSW_C1 ? " C1" : "",
2555 pX87->FSW & X86_FSW_C2 ? " C2" : "",
2556 pX87->FSW & X86_FSW_C3 ? " C3" : "",
2557 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(0)],
2558 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(1)],
2559 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(2)],
2560 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(3)],
2561 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(4)],
2562 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(5)],
2563 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(6)],
2564 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(7)]));
2565 DBGFR3DisasInstrCurrentLogInternal(pVCpu, NULL);
2566#endif
2567
2568 uint32_t cInstructions = 0;
2569#if 0 /* For testing purposes. */
2570 //STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2571 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2572 //STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2573 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_EXEC_ENGINE || rc == VINF_EM_RESCHEDULE_REM)
2574 rc = VINF_SUCCESS;
2575 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2576#endif
2577 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2578 if (pVM->em.s.fIemExecutesAll)
2579 {
2580 Assert(rc != VINF_EM_RESCHEDULE_REM);
2581 Assert(rc != VINF_EM_RESCHEDULE_EXEC_ENGINE);
2582#ifdef VBOX_HIGH_RES_TIMERS_HACK
2583 if (cInstructions < 2048)
2584 TMTimerPollVoid(pVM, pVCpu);
2585#endif
2586 }
2587 else if (rc == VINF_SUCCESS)
2588 rc = VINF_EM_RESCHEDULE; /* Need to check whether we can run in HM or NEM again. */
2589#ifndef VBOX_VMM_TARGET_ARMV8
2590 if (rc != VINF_EM_EMULATE_SPLIT_LOCK)
2591 { /* likely */ }
2592 else
2593 rc = VBOXSTRICTRC_TODO(emR3ExecuteSplitLockInstruction(pVM, pVCpu));
2594#endif
2595 fFFDone = false;
2596 break;
2597 }
2598
2599 /*
2600 * Application processor execution halted until SIPI.
2601 */
2602 case EMSTATE_WAIT_SIPI:
2603 /* no break */
2604 /*
2605 * hlt - execution halted until interrupt.
2606 */
2607 case EMSTATE_HALTED:
2608 {
2609 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2610 /* If HM (or someone else) store a pending interrupt in
2611 TRPM, it must be dispatched ASAP without any halting.
2612 Anything pending in TRPM has been accepted and the CPU
2613 should already be the right state to receive it. */
2614 if (TRPMHasTrap(pVCpu))
2615 rc = VINF_EM_RESCHEDULE;
2616#if !defined(VBOX_VMM_TARGET_ARMV8)
2617 /* MWAIT has a special extension where it's woken up when
2618 an interrupt is pending even when IF=0. */
2619 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2620 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2621 {
2622 rc = VMR3WaitHalted(pVM, pVCpu, 0 /*fFlags*/);
2623 if (rc == VINF_SUCCESS)
2624 {
2625 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2626 APICUpdatePendingInterrupts(pVCpu);
2627
2628 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2629 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2630 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2631 {
2632 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2633 rc = VINF_EM_RESCHEDULE;
2634 }
2635
2636 }
2637 }
2638#endif
2639 else
2640 {
2641#if defined(VBOX_VMM_TARGET_ARMV8)
2642 const uint32_t fWaitHalted = 0; /* WFI/WFE always return when an interrupt happens. */
2643#else
2644 const uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS;
2645#endif
2646 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted);
2647 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2648 check VMCPU_FF_UPDATE_APIC here. */
2649 if ( rc == VINF_SUCCESS
2650#if defined(VBOX_VMM_TARGET_ARMV8)
2651 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_VTIMER_ACTIVATED
2652 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ)
2653#else
2654 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)
2655#endif
2656 )
2657 {
2658 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2659 rc = VINF_EM_RESCHEDULE;
2660 }
2661 }
2662
2663 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2664 break;
2665 }
2666
2667 /*
2668 * Suspended - return to VM.cpp.
2669 */
2670 case EMSTATE_SUSPENDED:
2671 TMR3NotifySuspend(pVM, pVCpu);
2672 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2673 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2674 return VINF_EM_SUSPEND;
2675
2676 /*
2677 * Debugging in the guest.
2678 */
2679 case EMSTATE_DEBUG_GUEST_RAW:
2680 case EMSTATE_DEBUG_GUEST_HM:
2681 case EMSTATE_DEBUG_GUEST_NEM:
2682 case EMSTATE_DEBUG_GUEST_IEM:
2683 case EMSTATE_DEBUG_GUEST_RECOMPILER:
2684 TMR3NotifySuspend(pVM, pVCpu);
2685 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2686 TMR3NotifyResume(pVM, pVCpu);
2687 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2688 break;
2689
2690 /*
2691 * Debugging in the hypervisor.
2692 */
2693 case EMSTATE_DEBUG_HYPER:
2694 {
2695 TMR3NotifySuspend(pVM, pVCpu);
2696 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2697
2698 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2699 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2700 if (rc != VINF_SUCCESS)
2701 {
2702 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2703 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2704 else
2705 {
2706 /* switch to guru meditation mode */
2707 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2708 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2709 VMMR3FatalDump(pVM, pVCpu, rc);
2710 }
2711 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2712 return rc;
2713 }
2714
2715 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2716 TMR3NotifyResume(pVM, pVCpu);
2717 break;
2718 }
2719
2720 /*
2721 * Guru meditation takes place in the debugger.
2722 */
2723 case EMSTATE_GURU_MEDITATION:
2724 {
2725 TMR3NotifySuspend(pVM, pVCpu);
2726 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2727 VMMR3FatalDump(pVM, pVCpu, rc);
2728 emR3Debug(pVM, pVCpu, rc);
2729 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2730 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2731 return rc;
2732 }
2733
2734 /*
2735 * The states we don't expect here.
2736 */
2737 case EMSTATE_NONE:
2738 case EMSTATE_RAW_OBSOLETE:
2739 case EMSTATE_IEM_THEN_REM_OBSOLETE:
2740 case EMSTATE_TERMINATING:
2741 default:
2742 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2743 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2744 TMR3NotifySuspend(pVM, pVCpu);
2745 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2746 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2747 return VERR_EM_INTERNAL_ERROR;
2748 }
2749 } /* The Outer Main Loop */
2750 }
2751 else
2752 {
2753 /*
2754 * Fatal error.
2755 */
2756 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2757 TMR3NotifySuspend(pVM, pVCpu);
2758 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2759 VMMR3FatalDump(pVM, pVCpu, rc);
2760 emR3Debug(pVM, pVCpu, rc);
2761 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2762 /** @todo change the VM state! */
2763 return rc;
2764 }
2765
2766 /* not reached */
2767}
2768
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette