VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 109189

Last change on this file since 109189 was 109189, checked in by vboxsync, 23 hours ago

VMM: Removed CPUMR3DisasmInstrCPU as nobody uses it (replaced by DBGFR3DisasInstrCurrent and friends by now).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 115.7 KB
Line 
1/* $Id: EM.cpp 109189 2025-05-07 11:24:03Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2024 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RmExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/pdmapic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/dis.h>
71#include <VBox/err.h>
72#include "VMMTracing.h"
73
74#include <iprt/asm.h>
75#include <iprt/string.h>
76#include <iprt/stream.h>
77#include <iprt/thread.h>
78
79#include "EMInline.h"
80
81
82/*********************************************************************************************************************************
83* Internal Functions *
84*********************************************************************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
88static const char *emR3GetStateName(EMSTATE enmState);
89#endif
90static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
91
92
93/**
94 * Initializes the EM.
95 *
96 * @returns VBox status code.
97 * @param pVM The cross context VM structure.
98 */
99VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
100{
101 LogFlow(("EMR3Init\n"));
102 /*
103 * Assert alignment and sizes.
104 */
105 AssertCompileMemberAlignment(VM, em.s, 32);
106 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
107 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
108 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
109
110 /*
111 * Init the structure.
112 */
113 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
114 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
115
116 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
117#if (defined(VBOX_VMM_TARGET_X86) && !defined(RT_ARCH_X86) && !defined(RT_ARCH_AMD64)) \
118 || (defined(VBOX_VMM_TARGET_ARMV8) && !defined(RT_ARCH_ARM64)) /** @todo not main exec engine = iem/recomp would be better... */
119 true
120#else
121 false
122#endif
123 );
124 AssertLogRelRCReturn(rc, rc);
125
126 bool fEnabled;
127 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->em.s.fGuruOnTripleFault = !fEnabled;
130 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
131 {
132 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
133 pVM->em.s.fGuruOnTripleFault = true;
134 }
135
136 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
137
138 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
139 * Whether to try correlate exit history in any context, detect hot spots and
140 * try optimize these using IEM if there are other exits close by. This
141 * overrides the context specific settings. */
142 bool fExitOptimizationEnabled = true;
143 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
144 AssertLogRelRCReturn(rc, rc);
145
146 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
147 * Whether to optimize exits in ring-0. Setting this to false will also disable
148 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
149 * capabilities of the host kernel, this optimization may be unavailable. */
150 bool fExitOptimizationEnabledR0 = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
152 AssertLogRelRCReturn(rc, rc);
153 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
154
155 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
156 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
157 * hooks are in effect). */
158 /** @todo change the default to true here */
159 bool fExitOptimizationEnabledR0PreemptDisabled = true;
160 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
161 AssertLogRelRCReturn(rc, rc);
162 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
163
164 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
165 * Maximum number of instruction to let EMHistoryExec execute in one go. */
166 uint16_t cHistoryExecMaxInstructions = 8192;
167 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryExecMaxInstructions < 16)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
171
172 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
173 * Maximum number of instruction between exits during probing. */
174 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
175#ifdef RT_OS_WINDOWS
176 if (VM_IS_NEM_ENABLED(pVM))
177 cHistoryProbeMaxInstructionsWithoutExit = 32;
178#endif
179 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
180 cHistoryProbeMaxInstructionsWithoutExit);
181 AssertLogRelRCReturn(rc, rc);
182 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
183 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
184 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
185
186 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
187 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
188 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
189 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
190 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
191 cHistoryProbeMinInstructions);
192 AssertLogRelRCReturn(rc, rc);
193
194 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
195 {
196 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
197 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
198 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
199 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
200 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
201 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
202 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
203 }
204
205#ifdef VBOX_WITH_IEM_RECOMPILER
206 /** @cfgm{/EM/IemRecompiled, bool, true}
207 * Whether IEM bulk execution is recompiled or interpreted. */
208 rc = CFGMR3QueryBoolDef(pCfgEM, "IemRecompiled", &pVM->em.s.fIemRecompiled, true);
209 AssertLogRelRCReturn(rc, rc);
210#endif
211
212 /*
213 * Saved state.
214 */
215 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
216 NULL, NULL, NULL,
217 NULL, emR3Save, NULL,
218 NULL, emR3Load, NULL);
219 if (RT_FAILURE(rc))
220 return rc;
221
222 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
223 {
224 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
225
226 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
227 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
228 pVCpu->em.s.msTimeSliceStart = 0; /* paranoia */
229 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
230
231# define EM_REG_COUNTER(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_COUNTER_USED(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
237 AssertRC(rc);
238
239# define EM_REG_PROFILE(a, b, c) \
240 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
241 AssertRC(rc);
242
243# define EM_REG_PROFILE_ADV(a, b, c) \
244 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
245 AssertRC(rc);
246
247 /*
248 * Statistics.
249 */
250#ifdef VBOX_WITH_STATISTICS
251 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
252 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
253
254 /* these should be considered for release statistics. */
255 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
257 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
258#endif
259 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
260 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
261#ifdef VBOX_WITH_STATISTICS
262 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
263 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
264 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
265#endif
266 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
267 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
268#ifdef VBOX_WITH_STATISTICS
269 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
270#endif
271
272 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
273 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
274 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
275 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RecompilerExecute (excluding FFs).");
276
277 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
278
279 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
280 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
281 AssertRC(rc);
282
283 /* History record statistics */
284 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
285 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
286 AssertRC(rc);
287
288 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
289 {
290 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
291 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
292 AssertRC(rc);
293 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
294 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
295 AssertRC(rc);
296 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
297 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
298 AssertRC(rc);
299 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
300 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
301 AssertRC(rc);
302 }
303
304 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
305 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
306 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
307 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
308 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
309 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
312 }
313
314 emR3InitDbg(pVM);
315 return VINF_SUCCESS;
316}
317
318
319/**
320 * Called when a VM initialization stage is completed.
321 *
322 * @returns VBox status code.
323 * @param pVM The cross context VM structure.
324 * @param enmWhat The initialization state that was completed.
325 */
326VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
327{
328 if (enmWhat == VMINITCOMPLETED_RING0)
329 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
330 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
331 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
332 return VINF_SUCCESS;
333}
334
335
336/**
337 * Applies relocations to data and code managed by this
338 * component. This function will be called at init and
339 * whenever the VMM need to relocate it self inside the GC.
340 *
341 * @param pVM The cross context VM structure.
342 */
343VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
344{
345 LogFlow(("EMR3Relocate\n"));
346 RT_NOREF(pVM);
347}
348
349
350/**
351 * Reset the EM state for a CPU.
352 *
353 * Called by EMR3Reset and hot plugging.
354 *
355 * @param pVCpu The cross context virtual CPU structure.
356 */
357VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
358{
359 /* Reset scheduling state. */
360 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
361
362 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
363 out of the HALTED state here so that enmPrevState doesn't end up as
364 HALTED when EMR3Execute returns. */
365 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
366 {
367 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
368 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
369 }
370}
371
372
373/**
374 * Reset notification.
375 *
376 * @param pVM The cross context VM structure.
377 */
378VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
379{
380 Log(("EMR3Reset: \n"));
381 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
382 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
383}
384
385
386/**
387 * Terminates the EM.
388 *
389 * Termination means cleaning up and freeing all resources,
390 * the VM it self is at this point powered off or suspended.
391 *
392 * @returns VBox status code.
393 * @param pVM The cross context VM structure.
394 */
395VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
396{
397 RT_NOREF(pVM);
398 return VINF_SUCCESS;
399}
400
401
402/**
403 * Execute state save operation.
404 *
405 * @returns VBox status code.
406 * @param pVM The cross context VM structure.
407 * @param pSSM SSM operation handle.
408 */
409static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
410{
411 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
412 {
413 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
414
415 SSMR3PutBool(pSSM, false /*fForceRAW*/);
416
417 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
418 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
419 SSMR3PutU32(pSSM,
420 pVCpu->em.s.enmPrevState == EMSTATE_NONE
421 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED
422 || pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
423 ? pVCpu->em.s.enmPrevState : EMSTATE_NONE);
424
425 /* Save mwait state. */
426 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
427 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
428 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
429 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
430 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
431 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
432 AssertRCReturn(rc, rc);
433 }
434 return VINF_SUCCESS;
435}
436
437
438/**
439 * Execute state load operation.
440 *
441 * @returns VBox status code.
442 * @param pVM The cross context VM structure.
443 * @param pSSM SSM operation handle.
444 * @param uVersion Data layout version.
445 * @param uPass The data pass.
446 */
447static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
448{
449 /*
450 * Validate version.
451 */
452 if ( uVersion > EM_SAVED_STATE_VERSION
453 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
454 {
455 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
456 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
457 }
458 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
459
460 /*
461 * Load the saved state.
462 */
463 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
464 {
465 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
466
467 bool fForceRAWIgnored;
468 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
469 AssertRCReturn(rc, rc);
470
471 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
472 {
473 /* We are only intereseted in two enmPrevState values for use when
474 EMR3ExecuteVM is called.
475 Since ~r157540. only these two and EMSTATE_NONE are saved. */
476 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
477 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
478 if ( pVCpu->em.s.enmPrevState != EMSTATE_WAIT_SIPI
479 && pVCpu->em.s.enmPrevState != EMSTATE_HALTED)
480 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
481
482 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
483 }
484 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
485 {
486 /* Load mwait state. */
487 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
488 AssertRCReturn(rc, rc);
489 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
490 AssertRCReturn(rc, rc);
491 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
492 AssertRCReturn(rc, rc);
493 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
494 AssertRCReturn(rc, rc);
495 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
496 AssertRCReturn(rc, rc);
497 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
498 AssertRCReturn(rc, rc);
499 }
500 }
501 return VINF_SUCCESS;
502}
503
504
505/**
506 * Argument packet for emR3SetExecutionPolicy.
507 */
508struct EMR3SETEXECPOLICYARGS
509{
510 EMEXECPOLICY enmPolicy;
511 bool fEnforce;
512};
513
514
515/**
516 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
517 */
518static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
519{
520 /*
521 * Only the first CPU changes the variables.
522 */
523 if (pVCpu->idCpu == 0)
524 {
525 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
526 switch (pArgs->enmPolicy)
527 {
528 case EMEXECPOLICY_IEM_ALL:
529 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
530
531 /* For making '.alliem 1' useful during debugging, transition the
532 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
533 for (VMCPUID i = 0; i < pVM->cCpus; i++)
534 {
535 PVMCPU pVCpuX = pVM->apCpusR3[i];
536 switch (pVCpuX->em.s.enmState)
537 {
538 case EMSTATE_DEBUG_GUEST_RECOMPILER:
539 if (pVM->em.s.fIemRecompiled)
540 break;
541 RT_FALL_THROUGH();
542 case EMSTATE_DEBUG_GUEST_RAW:
543 case EMSTATE_DEBUG_GUEST_HM:
544 case EMSTATE_DEBUG_GUEST_NEM:
545 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
546 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
547 break;
548 case EMSTATE_DEBUG_GUEST_IEM:
549 default:
550 break;
551 }
552 }
553 break;
554
555 case EMEXECPOLICY_IEM_RECOMPILED:
556 pVM->em.s.fIemRecompiled = pArgs->fEnforce;
557 break;
558
559 default:
560 AssertFailedReturn(VERR_INVALID_PARAMETER);
561 }
562 Log(("EM: Set execution policy: fIemExecutesAll=%RTbool fIemRecompiled=%RTbool\n",
563 pVM->em.s.fIemExecutesAll, pVM->em.s.fIemRecompiled));
564 }
565
566 /*
567 * Force rescheduling if in HM, NEM, IEM/interpreter or IEM/recompiler.
568 */
569 Assert(pVCpu->em.s.enmState != EMSTATE_RAW_OBSOLETE);
570 return pVCpu->em.s.enmState == EMSTATE_HM
571 || pVCpu->em.s.enmState == EMSTATE_NEM
572 || pVCpu->em.s.enmState == EMSTATE_IEM
573 || pVCpu->em.s.enmState == EMSTATE_RECOMPILER
574 ? VINF_EM_RESCHEDULE
575 : VINF_SUCCESS;
576}
577
578
579/**
580 * Changes an execution scheduling policy parameter.
581 *
582 * This is used to enable or disable raw-mode / hardware-virtualization
583 * execution of user and supervisor code.
584 *
585 * @returns VINF_SUCCESS on success.
586 * @returns VINF_RESCHEDULE if a rescheduling might be required.
587 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
588 *
589 * @param pUVM The user mode VM handle.
590 * @param enmPolicy The scheduling policy to change.
591 * @param fEnforce Whether to enforce the policy or not.
592 */
593VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
594{
595 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
596 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
597 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
598
599 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
600 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
601}
602
603
604/**
605 * Queries an execution scheduling policy parameter.
606 *
607 * @returns VBox status code
608 * @param pUVM The user mode VM handle.
609 * @param enmPolicy The scheduling policy to query.
610 * @param pfEnforced Where to return the current value.
611 */
612VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
613{
614 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
615 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
616 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
617 PVM pVM = pUVM->pVM;
618 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
619
620 /* No need to bother EMTs with a query. */
621 switch (enmPolicy)
622 {
623 case EMEXECPOLICY_IEM_ALL:
624 *pfEnforced = pVM->em.s.fIemExecutesAll;
625 break;
626 case EMEXECPOLICY_IEM_RECOMPILED:
627 *pfEnforced = pVM->em.s.fIemRecompiled;
628 break;
629 default:
630 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
631 }
632
633 return VINF_SUCCESS;
634}
635
636
637/**
638 * Queries the main execution engine of the VM.
639 *
640 * @returns VBox status code
641 * @param pUVM The user mode VM handle.
642 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
643 */
644VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
645{
646 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
647 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
648
649 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
650 PVM pVM = pUVM->pVM;
651 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
652
653 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
654 return VINF_SUCCESS;
655}
656
657
658/**
659 * Raise a fatal error.
660 *
661 * Safely terminate the VM with full state report and stuff. This function
662 * will naturally never return.
663 *
664 * @param pVCpu The cross context virtual CPU structure.
665 * @param rc VBox status code.
666 */
667VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
668{
669 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
670 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
671}
672
673
674#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
675/**
676 * Gets the EM state name.
677 *
678 * @returns pointer to read only state name,
679 * @param enmState The state.
680 */
681static const char *emR3GetStateName(EMSTATE enmState)
682{
683 switch (enmState)
684 {
685 case EMSTATE_NONE: return "EMSTATE_NONE";
686 case EMSTATE_RAW_OBSOLETE: return "EMSTATE_RAW_OBSOLETE";
687 case EMSTATE_HM: return "EMSTATE_HM";
688 case EMSTATE_IEM: return "EMSTATE_IEM";
689 case EMSTATE_RECOMPILER: return "EMSTATE_RECOMPILER";
690 case EMSTATE_HALTED: return "EMSTATE_HALTED";
691 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
692 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
693 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
694 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
695 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
696 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
697 case EMSTATE_DEBUG_GUEST_RECOMPILER: return "EMSTATE_DEBUG_GUEST_RECOMPILER";
698 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
699 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
700 case EMSTATE_IEM_THEN_REM_OBSOLETE: return "EMSTATE_IEM_THEN_REM_OBSOLETE";
701 case EMSTATE_NEM: return "EMSTATE_NEM";
702 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
703 default: return "Unknown!";
704 }
705}
706#endif /* LOG_ENABLED || VBOX_STRICT */
707
708#ifdef VBOX_VMM_TARGET_X86
709
710/**
711 * Handle pending ring-3 I/O port write.
712 *
713 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
714 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
715 *
716 * @returns Strict VBox status code.
717 * @param pVM The cross context VM structure.
718 * @param pVCpu The cross context virtual CPU structure.
719 */
720VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
721{
722 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
723
724 /* Get and clear the pending data. */
725 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
726 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
727 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
728 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
729 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
730
731 /* Assert sanity. */
732 switch (cbValue)
733 {
734 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
735 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
736 case 4: break;
737 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
738 }
739 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
740
741 /* Do the work.*/
742 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
743 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
744 if (IOM_SUCCESS(rcStrict))
745 {
746 pVCpu->cpum.GstCtx.rip += cbInstr;
747 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
748 }
749 return rcStrict;
750}
751
752
753/**
754 * Handle pending ring-3 I/O port write.
755 *
756 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
757 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
758 *
759 * @returns Strict VBox status code.
760 * @param pVM The cross context VM structure.
761 * @param pVCpu The cross context virtual CPU structure.
762 */
763VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
764{
765 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
766
767 /* Get and clear the pending data. */
768 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
769 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
770 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
771 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
772
773 /* Assert sanity. */
774 switch (cbValue)
775 {
776 case 1: break;
777 case 2: break;
778 case 4: break;
779 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
780 }
781 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
782 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
783
784 /* Do the work.*/
785 uint32_t uValue = 0;
786 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
787 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
788 if (IOM_SUCCESS(rcStrict))
789 {
790 if (cbValue == 4)
791 pVCpu->cpum.GstCtx.rax = uValue;
792 else if (cbValue == 2)
793 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
794 else
795 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
796 pVCpu->cpum.GstCtx.rip += cbInstr;
797 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
798 }
799 return rcStrict;
800}
801
802
803/**
804 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
805 * Worker for emR3ExecuteSplitLockInstruction}
806 */
807static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
808{
809 /* Only execute on the specified EMT. */
810 if (pVCpu == (PVMCPU)pvUser)
811 {
812 LogFunc(("\n"));
813 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
814 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
815 if (rcStrict == VINF_IEM_RAISED_XCPT)
816 rcStrict = VINF_SUCCESS;
817 return rcStrict;
818 }
819 RT_NOREF(pVM);
820 return VINF_SUCCESS;
821}
822
823
824/**
825 * Handle an instruction causing a split cacheline lock access in SMP VMs.
826 *
827 * Generally we only get here if the host has split-lock detection enabled and
828 * this caused an \#AC because of something the guest did. If we interpret the
829 * instruction as-is, we'll likely just repeat the split-lock access and
830 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
831 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
832 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
833 * disregard the lock prefix when emulating the instruction.
834 *
835 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
836 * feature when entering guest context, but the support for the feature isn't a
837 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
838 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
839 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
840 * propert detection to SUPDrv later if we find it necessary.
841 *
842 * @see @bugref{10052}
843 *
844 * @returns Strict VBox status code.
845 * @param pVM The cross context VM structure.
846 * @param pVCpu The cross context virtual CPU structure.
847 */
848VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
849{
850 LogFunc(("\n"));
851 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
852}
853
854#endif /* VBOX_VMM_TARGET_X86 */
855
856/**
857 * Debug loop.
858 *
859 * @returns VBox status code for EM.
860 * @param pVM The cross context VM structure.
861 * @param pVCpu The cross context virtual CPU structure.
862 * @param rc Current EM VBox status code.
863 */
864static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
865{
866 for (;;)
867 {
868 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
869 const VBOXSTRICTRC rcLast = rc;
870
871 /*
872 * Debug related RC.
873 */
874 switch (VBOXSTRICTRC_VAL(rc))
875 {
876 /*
877 * Single step an instruction.
878 */
879 case VINF_EM_DBG_STEP:
880 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
881 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
882 AssertLogRelMsgFailedStmt(("Bad EM state."), rc = VERR_EM_INTERNAL_ERROR);
883#ifdef VBOX_WITH_HWVIRT
884 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
885 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
886#endif
887 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
888 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
889 else
890 {
891#if defined(VBOX_VMM_TARGET_X86) /** @todo IEM/arm64 */
892 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
893 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
894 rc = VINF_EM_DBG_STEPPED;
895#else
896 AssertFailed();
897 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
898#endif
899 }
900
901#ifdef VBOX_VMM_TARGET_X86
902 if (rc != VINF_EM_EMULATE_SPLIT_LOCK)
903 { /* likely */ }
904 else
905 {
906 rc = emR3ExecuteSplitLockInstruction(pVM, pVCpu);
907 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
908 rc = VINF_EM_DBG_STEPPED;
909 }
910#endif
911 break;
912
913 /*
914 * Simple events: stepped, breakpoint, stop/assertion.
915 */
916 case VINF_EM_DBG_STEPPED:
917 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
918 break;
919
920 case VINF_EM_DBG_BREAKPOINT:
921 rc = DBGFR3BpHit(pVM, pVCpu);
922 break;
923
924 case VINF_EM_DBG_STOP:
925 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
926 break;
927
928 case VINF_EM_DBG_EVENT:
929 rc = DBGFR3EventHandlePending(pVM, pVCpu);
930 break;
931
932 case VINF_EM_DBG_HYPER_STEPPED:
933 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
934 break;
935
936 case VINF_EM_DBG_HYPER_BREAKPOINT:
937 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
938 break;
939
940 case VINF_EM_DBG_HYPER_ASSERTION:
941 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
942 RTLogFlush(NULL);
943 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
944 break;
945
946 /*
947 * Guru meditation.
948 */
949 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
950 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
951 break;
952 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
953 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
954 break;
955
956 default: /** @todo don't use default for guru, but make special errors code! */
957 {
958 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
959 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
960 break;
961 }
962 }
963
964 /*
965 * Process the result.
966 */
967 switch (VBOXSTRICTRC_VAL(rc))
968 {
969 /*
970 * Continue the debugging loop.
971 */
972 case VINF_EM_DBG_STEP:
973 case VINF_EM_DBG_STOP:
974 case VINF_EM_DBG_EVENT:
975 case VINF_EM_DBG_STEPPED:
976 case VINF_EM_DBG_BREAKPOINT:
977 case VINF_EM_DBG_HYPER_STEPPED:
978 case VINF_EM_DBG_HYPER_BREAKPOINT:
979 case VINF_EM_DBG_HYPER_ASSERTION:
980 break;
981
982 /*
983 * Resuming execution (in some form) has to be done here if we got
984 * a hypervisor debug event.
985 */
986 case VINF_SUCCESS:
987 case VINF_EM_RESUME:
988 case VINF_EM_SUSPEND:
989 case VINF_EM_RESCHEDULE:
990 case VINF_EM_RESCHEDULE_REM:
991 case VINF_EM_HALT:
992 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
993 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
994 if (rc == VINF_SUCCESS)
995 rc = VINF_EM_RESCHEDULE;
996 return rc;
997
998 /*
999 * The debugger isn't attached.
1000 * We'll simply turn the thing off since that's the easiest thing to do.
1001 */
1002 case VERR_DBGF_NOT_ATTACHED:
1003 switch (VBOXSTRICTRC_VAL(rcLast))
1004 {
1005 case VINF_EM_DBG_HYPER_STEPPED:
1006 case VINF_EM_DBG_HYPER_BREAKPOINT:
1007 case VINF_EM_DBG_HYPER_ASSERTION:
1008 case VERR_TRPM_PANIC:
1009 case VERR_TRPM_DONT_PANIC:
1010 case VERR_VMM_RING0_ASSERTION:
1011 case VERR_VMM_HYPER_CR3_MISMATCH:
1012 case VERR_VMM_RING3_CALL_DISABLED:
1013 return rcLast;
1014 }
1015 return VINF_EM_OFF;
1016
1017 /*
1018 * Status codes terminating the VM in one or another sense.
1019 */
1020 case VINF_EM_TERMINATE:
1021 case VINF_EM_OFF:
1022 case VINF_EM_RESET:
1023 case VINF_EM_NO_MEMORY:
1024 case VINF_EM_RAW_STALE_SELECTOR:
1025 case VINF_EM_RAW_IRET_TRAP:
1026 case VERR_TRPM_PANIC:
1027 case VERR_TRPM_DONT_PANIC:
1028 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1029 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1030 case VERR_VMM_RING0_ASSERTION:
1031 case VERR_VMM_HYPER_CR3_MISMATCH:
1032 case VERR_VMM_RING3_CALL_DISABLED:
1033 case VERR_INTERNAL_ERROR:
1034 case VERR_INTERNAL_ERROR_2:
1035 case VERR_INTERNAL_ERROR_3:
1036 case VERR_INTERNAL_ERROR_4:
1037 case VERR_INTERNAL_ERROR_5:
1038 case VERR_IPE_UNEXPECTED_STATUS:
1039 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1040 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1041 return rc;
1042
1043 /*
1044 * The rest is unexpected, and will keep us here.
1045 */
1046 default:
1047 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1048 break;
1049 }
1050 } /* debug for ever */
1051}
1052
1053
1054/**
1055 * Executes recompiled code.
1056 *
1057 * This function contains the recompiler version of the inner
1058 * execution loop (the outer loop being in EMR3ExecuteVM()).
1059 *
1060 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1061 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1062 *
1063 * @param pVM The cross context VM structure.
1064 * @param pVCpu The cross context virtual CPU structure.
1065 * @param fWasHalted Set if we're comming out of a CPU HALT state.
1066 * @param pfFFDone Where to store an indicator telling whether or not
1067 * FFs were done before returning.
1068 *
1069 */
1070static VBOXSTRICTRC emR3RecompilerExecute(PVM pVM, PVMCPU pVCpu, bool fWasHalted, bool *pfFFDone)
1071{
1072 STAM_REL_PROFILE_START(&pVCpu->em.s.StatREMTotal, a);
1073#ifdef VBOX_VMM_TARGET_ARMV8
1074 LogFlow(("emR3RecompilerExecute/%u: (pc=%RGv)\n", pVCpu->idCpu, (RTGCPTR)pVCpu->cpum.GstCtx.Pc.u64));
1075#elif defined(VBOX_VMM_TARGET_X86)
1076 LogFlow(("emR3RecompilerExecute/%u: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
1077#else
1078# error "port me"
1079#endif
1080
1081 /*
1082 * Loop till we get a forced action which returns anything but VINF_SUCCESS.
1083 */
1084 *pfFFDone = false;
1085 VBOXSTRICTRC rcStrict = VINF_SUCCESS;
1086 for (;;)
1087 {
1088#ifdef LOG_ENABLED
1089# if defined(VBOX_VMM_TARGET_ARMV8)
1090 Log3(("EM: pc=%08RX64\n", CPUMGetGuestFlatPC(pVCpu)));
1091# elif defined(VBOX_VMM_TARGET_X86)
1092 if (!pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1093 Log(("EMR%d: %04X:%08RX64 RSP=%08RX64 IF=%d CR0=%x eflags=%x\n", CPUMGetGuestCPL(pVCpu), pVCpu->cpum.GstCtx.cs.Sel,
1094 pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF,
1095 (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1096 else
1097 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1098# else
1099# error "port me"
1100# endif
1101#endif
1102
1103 /*
1104 * Execute.
1105 */
1106 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1107 {
1108 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1109#ifdef VBOX_WITH_IEM_RECOMPILER
1110 if (pVM->em.s.fIemRecompiled)
1111 rcStrict = IEMExecRecompiler(pVM, pVCpu, fWasHalted);
1112 else
1113#endif
1114 rcStrict = IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/);
1115 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1116 }
1117 else
1118 {
1119 /* Give up this time slice; virtual time continues */
1120 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1121 RTThreadSleep(5);
1122 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1123 rcStrict = VINF_SUCCESS;
1124 }
1125
1126 /*
1127 * Deal with high priority post execution FFs before doing anything
1128 * else. Sync back the state and leave the lock to be on the safe side.
1129 */
1130 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1131 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1132 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
1133
1134 /*
1135 * Process the returned status code.
1136 */
1137 if (rcStrict != VINF_SUCCESS)
1138 {
1139#ifndef VBOX_VMM_TARGET_ARMV8
1140 if (rcStrict == VINF_EM_EMULATE_SPLIT_LOCK)
1141 rcStrict = emR3ExecuteSplitLockInstruction(pVM, pVCpu);
1142#endif
1143 if (rcStrict != VINF_SUCCESS)
1144 {
1145#if 0
1146 if (RT_LIKELY(rcStrict >= VINF_EM_FIRST && rcStrict <= VINF_EM_LAST))
1147 break;
1148 /* Fatal error: */
1149#endif
1150 break;
1151 }
1152 }
1153
1154
1155 /*
1156 * Check and execute forced actions.
1157 *
1158 * Sync back the VM state and leave the lock before calling any of
1159 * these, you never know what's going to happen here.
1160 */
1161#ifdef VBOX_HIGH_RES_TIMERS_HACK
1162 TMTimerPollVoid(pVM, pVCpu);
1163#endif
1164 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1165 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1166 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1167 {
1168 rcStrict = emR3ForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
1169 VBOXVMM_EM_FF_ALL_RET(pVCpu, VBOXSTRICTRC_VAL(rcStrict));
1170 if ( rcStrict != VINF_SUCCESS
1171 && rcStrict != VINF_EM_RESCHEDULE_REM)
1172 {
1173 *pfFFDone = true;
1174 break;
1175 }
1176 }
1177
1178 /*
1179 * Check if we can switch back to the main execution engine now.
1180 */
1181#ifdef VBOX_WITH_HWVIRT
1182 if (VM_IS_HM_ENABLED(pVM))
1183 {
1184 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1185 {
1186 *pfFFDone = true;
1187 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1188 break;
1189 }
1190 }
1191 else
1192#endif
1193 if (VM_IS_NEM_ENABLED(pVM))
1194 {
1195 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1196 {
1197 *pfFFDone = true;
1198 rcStrict = VINF_EM_RESCHEDULE_EXEC_ENGINE;
1199 break;
1200 }
1201 }
1202
1203#ifdef VBOX_WITH_IEM_RECOMPILER
1204 fWasHalted = false;
1205#else
1206 RT_NOREF(fWasHalted);
1207#endif
1208 } /* The Inner Loop, recompiled execution mode version. */
1209
1210 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatREMTotal, a);
1211 return rcStrict;
1212}
1213
1214
1215/**
1216 * Decides whether to execute HM, NEM, IEM/interpreter or IEM/recompiler.
1217 *
1218 * @returns new EM state
1219 * @param pVM The cross context VM structure.
1220 * @param pVCpu The cross context virtual CPU structure.
1221 */
1222EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1223{
1224 /*
1225 * We stay in the wait for SIPI state unless explicitly told otherwise.
1226 */
1227 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1228 return EMSTATE_WAIT_SIPI;
1229
1230 /*
1231 * Can we use the default engine. IEM is the fallback.
1232 */
1233 if (!pVM->em.s.fIemExecutesAll)
1234 {
1235 switch (pVM->bMainExecutionEngine)
1236 {
1237#ifdef VBOX_WITH_HWVIRT
1238 case VM_EXEC_ENGINE_HW_VIRT:
1239 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1240 return EMSTATE_HM;
1241 break;
1242#endif
1243#ifdef VBOX_WITH_NATIVE_NEM
1244 case VM_EXEC_ENGINE_NATIVE_API:
1245 if (NEMR3CanExecuteGuest(pVM, pVCpu))
1246 return EMSTATE_NEM;
1247 break;
1248#endif
1249 case VM_EXEC_ENGINE_IEM:
1250 break;
1251 default:
1252 AssertMsgFailed(("bMainExecutionEngine=%d\n", pVM->bMainExecutionEngine));
1253 break;
1254 }
1255 }
1256#ifdef VBOX_WITH_IEM_RECOMPILER
1257 return pVM->em.s.fIemRecompiled ? EMSTATE_RECOMPILER : EMSTATE_IEM;
1258#else
1259 return EMSTATE_IEM;
1260#endif
1261}
1262
1263
1264/**
1265 * Executes all high priority post execution force actions.
1266 *
1267 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1268 * fatal error status code.
1269 *
1270 * @param pVM The cross context VM structure.
1271 * @param pVCpu The cross context virtual CPU structure.
1272 * @param rc The current strict VBox status code rc.
1273 */
1274VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1275{
1276 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1277
1278 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1279 PDMCritSectBothFF(pVM, pVCpu);
1280
1281#ifdef VBOX_VMM_TARGET_X86
1282 /* Update CR3 (Nested Paging case for HM). */
1283 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1284 {
1285 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1286 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1287 if (RT_FAILURE(rc2))
1288 return rc2;
1289 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1290 }
1291#endif
1292
1293 /* IEM has pending work (typically memory write after INS instruction). */
1294 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1295 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1296
1297 /* IOM has pending work (comitting an I/O or MMIO write). */
1298 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1299 {
1300 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1301 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1302 { /* half likely, or at least it's a line shorter. */ }
1303 else if (rc == VINF_SUCCESS)
1304 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1305 else
1306 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1307 }
1308
1309 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1310 {
1311 if ( rc > VINF_EM_NO_MEMORY
1312 && rc <= VINF_EM_LAST)
1313 rc = VINF_EM_NO_MEMORY;
1314 }
1315
1316 return rc;
1317}
1318
1319#ifdef VBOX_VMM_TARGET_X86
1320
1321/**
1322 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1323 *
1324 * @returns VBox status code.
1325 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1326 * @param pVCpu The cross context virtual CPU structure.
1327 */
1328static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1329{
1330# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1331 /* Handle the "external interrupt" VM-exit intercept. */
1332 if ( CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1333 && !CPUMIsGuestVmxExitCtlsSet(&pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1334 {
1335 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1336 AssertMsg( rcStrict != VINF_VMX_VMEXIT /* VM-exit should have been converted to VINF_SUCCESS. */
1337 && rcStrict != VINF_NO_CHANGE
1338 && rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1339 return VBOXSTRICTRC_VAL(rcStrict);
1340 }
1341# else
1342 RT_NOREF(pVCpu);
1343# endif
1344 return VINF_NO_CHANGE;
1345}
1346
1347
1348/**
1349 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1350 *
1351 * @returns VBox status code.
1352 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1353 * @param pVCpu The cross context virtual CPU structure.
1354 */
1355static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1356{
1357# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1358 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1359 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1360 {
1361 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1362 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1363 if (RT_SUCCESS(rcStrict))
1364 {
1365 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1366 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1367 return VBOXSTRICTRC_VAL(rcStrict);
1368 }
1369
1370 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1371 return VINF_EM_TRIPLE_FAULT;
1372 }
1373# else
1374 NOREF(pVCpu);
1375# endif
1376 return VINF_NO_CHANGE;
1377}
1378
1379
1380/**
1381 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1382 *
1383 * @returns VBox status code.
1384 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1385 * @param pVCpu The cross context virtual CPU structure.
1386 */
1387static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1388{
1389# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1390 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1391 {
1392 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1393 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1394 if (RT_SUCCESS(rcStrict))
1395 {
1396 Assert(rcStrict != VINF_SVM_VMEXIT);
1397 return VBOXSTRICTRC_VAL(rcStrict);
1398 }
1399 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1400 return VINF_EM_TRIPLE_FAULT;
1401 }
1402# else
1403 NOREF(pVCpu);
1404# endif
1405 return VINF_NO_CHANGE;
1406}
1407
1408#endif /* VBOX_VMM_TARGET_X86 */
1409
1410/**
1411 * Executes all pending forced actions.
1412 *
1413 * Forced actions can cause execution delays and execution
1414 * rescheduling. The first we deal with using action priority, so
1415 * that for instance pending timers aren't scheduled and ran until
1416 * right before execution. The rescheduling we deal with using
1417 * return codes. The same goes for VM termination, only in that case
1418 * we exit everything.
1419 *
1420 * @returns VBox status code of equal or greater importance/severity than rc.
1421 * The most important ones are: VINF_EM_RESCHEDULE,
1422 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1423 *
1424 * @param pVM The cross context VM structure.
1425 * @param pVCpu The cross context virtual CPU structure.
1426 * @param rc The current rc.
1427 *
1428 */
1429int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1430{
1431 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1432#ifdef VBOX_STRICT
1433 int rcIrq = VINF_SUCCESS;
1434#endif
1435 int rc2;
1436#define UPDATE_RC() \
1437 do { \
1438 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1439 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1440 break; \
1441 if (!rc || rc2 < rc) \
1442 rc = rc2; \
1443 } while (0)
1444 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1445
1446 /*
1447 * Post execution chunk first.
1448 */
1449 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1450 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1451 {
1452 /*
1453 * EMT Rendezvous (must be serviced before termination).
1454 */
1455 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1456 {
1457 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1458 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1459 UPDATE_RC();
1460 /** @todo HACK ALERT! The following test is to make sure EM+TM
1461 * thinks the VM is stopped/reset before the next VM state change
1462 * is made. We need a better solution for this, or at least make it
1463 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1464 * VINF_EM_SUSPEND). */
1465 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1466 {
1467 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1468 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1469 return rc;
1470 }
1471 }
1472
1473 /*
1474 * State change request (cleared by vmR3SetStateLocked).
1475 */
1476 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1477 {
1478 VMSTATE enmState = VMR3GetState(pVM);
1479 switch (enmState)
1480 {
1481 case VMSTATE_FATAL_ERROR:
1482 case VMSTATE_FATAL_ERROR_LS:
1483 case VMSTATE_GURU_MEDITATION:
1484 case VMSTATE_GURU_MEDITATION_LS:
1485 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1486 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1487 return VINF_EM_SUSPEND;
1488
1489 case VMSTATE_DESTROYING:
1490 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1491 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1492 return VINF_EM_TERMINATE;
1493
1494 default:
1495 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1496 }
1497 }
1498
1499 /*
1500 * Debugger Facility polling.
1501 */
1502 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1503 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1504 {
1505 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1506 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1507 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1508 * somewhere before we get here, I would think. */
1509 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1510 rc = rc2;
1511 else
1512 UPDATE_RC();
1513 }
1514
1515 /*
1516 * Postponed reset request.
1517 */
1518 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1519 {
1520 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1521 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1522 UPDATE_RC();
1523 }
1524
1525#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1526 /*
1527 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1528 */
1529 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1530 {
1531 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1532 UPDATE_RC();
1533 if (rc == VINF_EM_NO_MEMORY)
1534 return rc;
1535 }
1536#endif
1537
1538 /* check that we got them all */
1539 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1540 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1541 }
1542
1543 /*
1544 * Normal priority then.
1545 * (Executed in no particular order.)
1546 */
1547 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1548 {
1549 /*
1550 * PDM Queues are pending.
1551 */
1552 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1553 PDMR3QueueFlushAll(pVM);
1554
1555 /*
1556 * PDM DMA transfers are pending.
1557 */
1558 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1559 PDMR3DmaRun(pVM);
1560
1561 /*
1562 * EMT Rendezvous (make sure they are handled before the requests).
1563 */
1564 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1565 {
1566 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1567 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1568 UPDATE_RC();
1569 /** @todo HACK ALERT! The following test is to make sure EM+TM
1570 * thinks the VM is stopped/reset before the next VM state change
1571 * is made. We need a better solution for this, or at least make it
1572 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1573 * VINF_EM_SUSPEND). */
1574 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1575 {
1576 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1577 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1578 return rc;
1579 }
1580 }
1581
1582 /*
1583 * Requests from other threads.
1584 */
1585 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1586 {
1587 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1588 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1589 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1590 {
1591 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1592 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1593 return rc2;
1594 }
1595 UPDATE_RC();
1596 /** @todo HACK ALERT! The following test is to make sure EM+TM
1597 * thinks the VM is stopped/reset before the next VM state change
1598 * is made. We need a better solution for this, or at least make it
1599 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1600 * VINF_EM_SUSPEND). */
1601 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1602 {
1603 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1604 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1605 return rc;
1606 }
1607 }
1608
1609 /* check that we got them all */
1610 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1611 }
1612
1613 /*
1614 * Normal priority then. (per-VCPU)
1615 * (Executed in no particular order.)
1616 */
1617 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1618 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1619 {
1620 /*
1621 * Requests from other threads.
1622 */
1623 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1624 {
1625 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1626 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1627 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1628 {
1629 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1630 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1631 return rc2;
1632 }
1633 UPDATE_RC();
1634 /** @todo HACK ALERT! The following test is to make sure EM+TM
1635 * thinks the VM is stopped/reset before the next VM state change
1636 * is made. We need a better solution for this, or at least make it
1637 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1638 * VINF_EM_SUSPEND). */
1639 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1640 {
1641 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1642 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1643 return rc;
1644 }
1645 }
1646
1647 /* check that we got them all */
1648 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1649 }
1650
1651 /*
1652 * High priority pre execution chunk last.
1653 * (Executed in ascending priority order.)
1654 */
1655 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1656 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1657 {
1658 /*
1659 * Timers before interrupts.
1660 */
1661 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1662 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1663 TMR3TimerQueuesDo(pVM);
1664
1665#ifdef VBOX_VMM_TARGET_X86
1666 /*
1667 * Pick up asynchronously posted interrupts into the APIC.
1668 */
1669 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1670 PDMApicUpdatePendingInterrupts(pVCpu);
1671
1672 /*
1673 * The instruction following an emulated STI should *always* be executed!
1674 *
1675 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1676 * the eip is the same as the inhibited instr address. Before we
1677 * are able to execute this instruction in raw mode (iret to
1678 * guest code) an external interrupt might force a world switch
1679 * again. Possibly allowing a guest interrupt to be dispatched
1680 * in the process. This could break the guest. Sounds very
1681 * unlikely, but such timing sensitive problem are not as rare as
1682 * you might think.
1683 *
1684 * Note! This used to be a force action flag. Can probably ditch this code.
1685 */
1686 /** @todo r=bird: the clearing case will *never* be taken here as
1687 * CPUMIsInInterruptShadow already makes sure the RIPs matches. */
1688 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1689 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1690 {
1691 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1692 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1693 {
1694 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1695 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1696 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1697 }
1698 else
1699 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1700 }
1701
1702 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1703 * delivered. */
1704
1705# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1706 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER
1707 | VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW))
1708 {
1709 /*
1710 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1711 * Takes priority over even SMI and INIT signals.
1712 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1713 */
1714 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1715 {
1716 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1717 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1718 UPDATE_RC();
1719 }
1720
1721 /*
1722 * APIC write emulation MAY have a caused a VM-exit.
1723 * If it did cause a VM-exit, there's no point checking the other VMX non-root mode FFs here.
1724 */
1725 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
1726 {
1727 /*
1728 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1729 * Takes priority over "Traps on the previous instruction".
1730 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1731 */
1732 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1733 {
1734 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1735 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1736 UPDATE_RC();
1737 }
1738 /*
1739 * VMX Nested-guest preemption timer VM-exit.
1740 * Takes priority over NMI-window VM-exits.
1741 */
1742 else if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1743 {
1744 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1745 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1746 UPDATE_RC();
1747 }
1748 /*
1749 * VMX interrupt-window and NMI-window VM-exits.
1750 * Takes priority over non-maskable interrupts (NMIs) and external interrupts respectively.
1751 * If we are in an interrupt shadow or if we already in the process of delivering
1752 * an event then these VM-exits cannot occur.
1753 *
1754 * Interrupt shadows block NMI-window VM-exits.
1755 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1756 *
1757 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1758 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1759 * See Intel spec. 6.7 "Nonmaskable Interrupt (NMI)".
1760 */
1761 else if ( !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1762 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx)
1763 && !TRPMHasTrap(pVCpu))
1764 {
1765 /*
1766 * VMX NMI-window VM-exit.
1767 */
1768 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1769 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1770 {
1771 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1772 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1773 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1774 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1775 && rc2 != VINF_VMX_VMEXIT
1776 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1777 UPDATE_RC();
1778 }
1779 /*
1780 * VMX interrupt-window VM-exit.
1781 * This is a bit messy with the way the code below is currently structured,
1782 * but checking VMCPU_FF_INTERRUPT_NMI here (combined with CPUMAreInterruptsInhibitedByNmi
1783 * already checked at this point) should allow a pending NMI to be delivered prior to
1784 * causing an interrupt-window VM-exit.
1785 */
1786 /** @todo Restructure this later to happen after injecting NMI/causing NMI-exit, see
1787 * code in VMX R0 event delivery. */
1788 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1789 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1790 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1791 {
1792 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1793 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1794 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1795 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1796 && rc2 != VINF_VMX_VMEXIT
1797 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1798 UPDATE_RC();
1799 }
1800 }
1801 }
1802
1803 /*
1804 * Interrupt-window and NMI-window force flags might still be pending if we didn't actually cause
1805 * a VM-exit above. They will get cleared eventually when ANY nested-guest VM-exit occurs.
1806 * However, the force flags asserted below MUST have been cleared at this point.
1807 */
1808 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1809 }
1810# endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1811
1812 /*
1813 * Guest event injection.
1814 */
1815 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1816 bool fWakeupPending = false;
1817 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1818 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1819 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1820 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1821 && (!rc || rc >= VINF_EM_RESCHEDULE_EXEC_ENGINE)
1822 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1823 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1824 {
1825 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1826 {
1827 bool fInVmxNonRootMode;
1828 bool fInSvmHwvirtMode;
1829 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1830 {
1831 fInVmxNonRootMode = false;
1832 fInSvmHwvirtMode = false;
1833 }
1834 else
1835 {
1836 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1837 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1838 }
1839
1840 /*
1841 * NMIs (take priority over external interrupts).
1842 */
1843 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1844 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1845 {
1846# ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1847 if ( fInVmxNonRootMode
1848 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1849 {
1850 /* We MUST clear the NMI force-flag here, see @bugref{10318#c19}. */
1851 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1852 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1853 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1854 UPDATE_RC();
1855 }
1856 else
1857# endif
1858# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1859 if ( fInSvmHwvirtMode
1860 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1861 {
1862 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1863 AssertMsg( rc2 != VINF_SVM_VMEXIT
1864 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1865 UPDATE_RC();
1866 }
1867 else
1868# endif
1869 {
1870 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_NMI);
1871 if (rc2 == VINF_SUCCESS)
1872 {
1873 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1874 fWakeupPending = true;
1875# if 0 /* HMR3IsActive is not reliable (esp. after restore), just return VINF_EM_RESCHEDULE. */
1876 if (pVM->em.s.fIemExecutesAll)
1877 rc2 = VINF_EM_RESCHEDULE;
1878 else
1879 {
1880 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1881 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1882 : VINF_EM_RESCHEDULE_REM;
1883 }
1884# else
1885 rc2 = VINF_EM_RESCHEDULE;
1886# endif
1887 }
1888 UPDATE_RC();
1889 }
1890 }
1891# ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1892 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1893 * actually pending like we currently do. */
1894# endif
1895 /*
1896 * External interrupts.
1897 */
1898 else
1899 {
1900 /*
1901 * VMX: virtual interrupts takes priority over physical interrupts.
1902 * SVM: physical interrupts takes priority over virtual interrupts.
1903 */
1904 if ( fInVmxNonRootMode
1905 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1906 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1907 {
1908 /** @todo NSTVMX: virtual-interrupt delivery. */
1909 rc2 = VINF_SUCCESS;
1910 }
1911 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1912 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1913 {
1914 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1915 if (fInVmxNonRootMode)
1916 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1917 else if (fInSvmHwvirtMode)
1918 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1919 else
1920 rc2 = VINF_NO_CHANGE;
1921
1922 if (rc2 == VINF_NO_CHANGE)
1923 {
1924 bool fInjected = false;
1925 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1926 /** @todo this really isn't nice, should properly handle this */
1927 /* Note! This can still cause a VM-exit (on Intel). */
1928 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1929 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1930 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1931 fWakeupPending = true;
1932 if ( pVM->em.s.fIemExecutesAll
1933 && ( rc2 == VINF_EM_RESCHEDULE_REM
1934 || rc2 == VINF_EM_RESCHEDULE_EXEC_ENGINE))
1935 rc2 = VINF_EM_RESCHEDULE;
1936# ifdef VBOX_STRICT
1937 if (fInjected)
1938 rcIrq = rc2;
1939# endif
1940 }
1941 UPDATE_RC();
1942 }
1943 else if ( fInSvmHwvirtMode
1944 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1945 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1946 {
1947 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1948 if (rc2 == VINF_NO_CHANGE)
1949 {
1950 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1951 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1952 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1953 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1954 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1955 rc2 = VINF_EM_RESCHEDULE;
1956# ifdef VBOX_STRICT
1957 rcIrq = rc2;
1958# endif
1959 }
1960 UPDATE_RC();
1961 }
1962 }
1963 } /* CPUMGetGuestGif */
1964 }
1965
1966#else /* VBOX_VMM_TARGET_ARMV8 */
1967 bool fWakeupPending = false;
1968 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VTIMER_ACTIVATED))
1969 {
1970 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_VTIMER_ACTIVATED);
1971
1972 fWakeupPending = true;
1973 rc2 = VINF_EM_RESCHEDULE;
1974 UPDATE_RC();
1975 }
1976#endif /* VBOX_VMM_TARGET_ARMV8 */
1977
1978#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
1979 /*
1980 * Allocate handy pages.
1981 */
1982 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1983 {
1984 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1985 UPDATE_RC();
1986 }
1987#endif
1988
1989 /*
1990 * Debugger Facility request.
1991 */
1992 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1993 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1994 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
1995 {
1996 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1997 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1998 UPDATE_RC();
1999 }
2000
2001 /*
2002 * EMT Rendezvous (must be serviced before termination).
2003 */
2004 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2005 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2006 {
2007 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2008 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2009 UPDATE_RC();
2010 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2011 * stopped/reset before the next VM state change is made. We need a better
2012 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2013 * && rc >= VINF_EM_SUSPEND). */
2014 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2015 {
2016 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2017 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2018 return rc;
2019 }
2020 }
2021
2022 /*
2023 * State change request (cleared by vmR3SetStateLocked).
2024 */
2025 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2026 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2027 {
2028 VMSTATE enmState = VMR3GetState(pVM);
2029 switch (enmState)
2030 {
2031 case VMSTATE_FATAL_ERROR:
2032 case VMSTATE_FATAL_ERROR_LS:
2033 case VMSTATE_GURU_MEDITATION:
2034 case VMSTATE_GURU_MEDITATION_LS:
2035 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2036 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2037 return VINF_EM_SUSPEND;
2038
2039 case VMSTATE_DESTROYING:
2040 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2041 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2042 return VINF_EM_TERMINATE;
2043
2044 default:
2045 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2046 }
2047 }
2048
2049 /*
2050 * Out of memory? Since most of our fellow high priority actions may cause us
2051 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2052 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2053 * than us since we can terminate without allocating more memory.
2054 */
2055 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2056 {
2057#ifndef VBOX_WITH_ONLY_PGM_NEM_MODE
2058 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2059#else
2060 rc2 = VINF_EM_NO_MEMORY;
2061#endif
2062 UPDATE_RC();
2063 if (rc == VINF_EM_NO_MEMORY)
2064 return rc;
2065 }
2066
2067 /*
2068 * If the virtual sync clock is still stopped, make TM restart it.
2069 */
2070 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2071 TMR3VirtualSyncFF(pVM, pVCpu);
2072
2073#ifdef DEBUG
2074 /*
2075 * Debug, pause the VM.
2076 */
2077 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2078 {
2079 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2080 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2081 return VINF_EM_SUSPEND;
2082 }
2083#endif
2084
2085 /* check that we got them all */
2086 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2087#ifdef VBOX_VMM_TARGET_ARMV8
2088 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_IRQ | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_DBGF));
2089#elif defined(VBOX_VMM_TARGET_X86)
2090 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2091#else
2092# error "port me"
2093#endif
2094 }
2095
2096#undef UPDATE_RC
2097 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2098 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2099 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2100 return rc;
2101}
2102
2103
2104/**
2105 * Check if the preset execution time cap restricts guest execution scheduling.
2106 *
2107 * @returns true if allowed, false otherwise
2108 * @param pVM The cross context VM structure.
2109 * @param pVCpu The cross context virtual CPU structure.
2110 */
2111bool emR3IsExecutionAllowedSlow(PVM pVM, PVMCPU pVCpu)
2112{
2113 Assert(pVM->uCpuExecutionCap != 100);
2114 uint64_t cMsUserTime;
2115 uint64_t cMsKernelTime;
2116 if (RT_SUCCESS(RTThreadGetExecutionTimeMilli(&cMsKernelTime, &cMsUserTime)))
2117 {
2118 uint64_t const msTimeNow = RTTimeMilliTS();
2119 if (pVCpu->em.s.msTimeSliceStart + EM_TIME_SLICE < msTimeNow)
2120 {
2121 /* New time slice. */
2122 pVCpu->em.s.msTimeSliceStart = msTimeNow;
2123 pVCpu->em.s.cMsTimeSliceStartExec = cMsKernelTime + cMsUserTime;
2124 pVCpu->em.s.cMsTimeSliceExec = 0;
2125 }
2126 pVCpu->em.s.cMsTimeSliceExec = cMsKernelTime + cMsUserTime - pVCpu->em.s.cMsTimeSliceStartExec;
2127
2128 bool const fRet = pVCpu->em.s.cMsTimeSliceExec < (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100;
2129 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.msTimeSliceStart,
2130 pVCpu->em.s.cMsTimeSliceStartExec, pVCpu->em.s.cMsTimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2131 return fRet;
2132 }
2133 return true;
2134}
2135
2136
2137/**
2138 * Execute VM.
2139 *
2140 * This function is the main loop of the VM. The emulation thread
2141 * calls this function when the VM has been successfully constructed
2142 * and we're ready for executing the VM.
2143 *
2144 * Returning from this function means that the VM is turned off or
2145 * suspended (state already saved) and deconstruction is next in line.
2146 *
2147 * All interaction from other thread are done using forced actions
2148 * and signalling of the wait object.
2149 *
2150 * @returns VBox status code, informational status codes may indicate failure.
2151 * @param pVM The cross context VM structure.
2152 * @param pVCpu The cross context virtual CPU structure.
2153 */
2154VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2155{
2156 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2157 pVM,
2158 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2159 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2160 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2161 VM_ASSERT_EMT(pVM);
2162 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2163 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2164 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2165 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2166
2167 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2168 if (rc == 0)
2169 {
2170 /*
2171 * Start the virtual time.
2172 */
2173 TMR3NotifyResume(pVM, pVCpu);
2174
2175 /*
2176 * The Outer Main Loop.
2177 */
2178 bool fFFDone = false;
2179
2180 /* Reschedule right away to start in the right state. */
2181 rc = VINF_SUCCESS;
2182
2183 /* If resuming after a pause or a state load, restore the previous
2184 state or else we'll start executing code. Else, just reschedule. */
2185 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2186 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2187 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2188 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2189 else
2190 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2191 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2192
2193 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2194 for (;;)
2195 {
2196 /*
2197 * Before we can schedule anything (we're here because
2198 * scheduling is required) we must service any pending
2199 * forced actions to avoid any pending action causing
2200 * immediate rescheduling upon entering an inner loop
2201 *
2202 * Do forced actions.
2203 */
2204 if ( !fFFDone
2205 && RT_SUCCESS(rc)
2206 && rc != VINF_EM_TERMINATE
2207 && rc != VINF_EM_OFF
2208 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2209 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2210 {
2211 rc = emR3ForcedActions(pVM, pVCpu, rc);
2212 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2213 }
2214 else if (fFFDone)
2215 fFFDone = false;
2216
2217#if defined(VBOX_STRICT) && defined(VBOX_VMM_TARGET_X86)
2218 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2219#endif
2220
2221 /*
2222 * Now what to do?
2223 */
2224 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2225 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2226 switch (rc)
2227 {
2228 /*
2229 * Keep doing what we're currently doing.
2230 */
2231 case VINF_SUCCESS:
2232 break;
2233
2234 /*
2235 * Reschedule - to main execution engine (HM, NEM, IEM/REM).
2236 */
2237 case VINF_EM_RESCHEDULE_EXEC_ENGINE:
2238 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2239 if (!pVM->em.s.fIemExecutesAll)
2240 {
2241#ifdef VBOX_WITH_HWVIRT
2242 if (VM_IS_HM_ENABLED(pVM))
2243 {
2244 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2245 {
2246 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2247 pVCpu->em.s.enmState = EMSTATE_HM;
2248 break;
2249 }
2250 }
2251 else
2252#endif
2253 if (VM_IS_NEM_ENABLED(pVM) && NEMR3CanExecuteGuest(pVM, pVCpu))
2254 {
2255 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2256 pVCpu->em.s.enmState = EMSTATE_NEM;
2257 break;
2258 }
2259 }
2260
2261 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_EXEC_ENGINE: %d -> %d (EMSTATE_RECOMPILER)\n", enmOldState, EMSTATE_RECOMPILER));
2262 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2263 break;
2264
2265 /*
2266 * Reschedule - to recompiled execution.
2267 */
2268 case VINF_EM_RESCHEDULE_REM:
2269 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2270 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n",
2271 enmOldState, EMSTATE_RECOMPILER));
2272 pVCpu->em.s.enmState = EMSTATE_RECOMPILER;
2273 break;
2274
2275 /*
2276 * Resume.
2277 */
2278 case VINF_EM_RESUME:
2279 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2280 /* Don't reschedule in the halted or wait-for-SIPI cases. */
2281 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2282 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2283 {
2284 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2285 break;
2286 }
2287 /* fall through and get scheduled. */
2288 RT_FALL_THRU();
2289
2290 /*
2291 * Reschedule.
2292 */
2293 case VINF_EM_RESCHEDULE:
2294 {
2295 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2296 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2297 pVCpu->em.s.enmState = enmState;
2298 break;
2299 }
2300
2301 /*
2302 * Halted.
2303 */
2304 case VINF_EM_HALT:
2305 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2306 pVCpu->em.s.enmState = EMSTATE_HALTED;
2307 break;
2308
2309 /*
2310 * Switch to the wait for SIPI state (application processor only)
2311 */
2312 case VINF_EM_WAIT_SIPI:
2313 Assert(pVCpu->idCpu != 0);
2314 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2315 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2316 break;
2317
2318
2319 /*
2320 * Suspend.
2321 */
2322 case VINF_EM_SUSPEND:
2323 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2324 Assert(enmOldState != EMSTATE_SUSPENDED);
2325 pVCpu->em.s.enmPrevState = enmOldState;
2326 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2327 break;
2328
2329 /*
2330 * Reset.
2331 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2332 */
2333 case VINF_EM_RESET:
2334 {
2335 if (pVCpu->idCpu == 0)
2336 {
2337 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2338 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2339 pVCpu->em.s.enmState = enmState;
2340 }
2341 else
2342 {
2343 /* All other VCPUs go into the wait for SIPI state. */
2344 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2345 }
2346 break;
2347 }
2348
2349 /*
2350 * Power Off.
2351 */
2352 case VINF_EM_OFF:
2353 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2354 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2355 TMR3NotifySuspend(pVM, pVCpu);
2356 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2357 return rc;
2358
2359 /*
2360 * Terminate the VM.
2361 */
2362 case VINF_EM_TERMINATE:
2363 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2364 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2365 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2366 TMR3NotifySuspend(pVM, pVCpu);
2367 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2368 return rc;
2369
2370
2371 /*
2372 * Out of memory, suspend the VM and stuff.
2373 */
2374 case VINF_EM_NO_MEMORY:
2375 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2376 Assert(enmOldState != EMSTATE_SUSPENDED);
2377 pVCpu->em.s.enmPrevState = enmOldState;
2378 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2379 TMR3NotifySuspend(pVM, pVCpu);
2380 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2381
2382 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2383 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2384 if (rc != VINF_EM_SUSPEND)
2385 {
2386 if (RT_SUCCESS_NP(rc))
2387 {
2388 AssertLogRelMsgFailed(("%Rrc\n", rc));
2389 rc = VERR_EM_INTERNAL_ERROR;
2390 }
2391 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2392 }
2393 return rc;
2394
2395 /*
2396 * Guest debug events.
2397 */
2398 case VINF_EM_DBG_STEPPED:
2399 case VINF_EM_DBG_STOP:
2400 case VINF_EM_DBG_EVENT:
2401 case VINF_EM_DBG_BREAKPOINT:
2402 case VINF_EM_DBG_STEP:
2403 if (enmOldState == EMSTATE_HM)
2404 {
2405 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2406 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2407 }
2408 else if (enmOldState == EMSTATE_NEM)
2409 {
2410 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2411 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2412 }
2413 else if (enmOldState == EMSTATE_RECOMPILER)
2414 {
2415 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RECOMPILER));
2416 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RECOMPILER;
2417 }
2418 else
2419 {
2420#ifdef VBOX_VMM_TARGET_ARMV8
2421 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2422 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM; /** @todo No IEM yet and this gets selected if enmOldState == EMSTATE_HALTED. */
2423#else
2424 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2425 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2426#endif
2427 }
2428 break;
2429
2430 /*
2431 * Hypervisor debug events.
2432 */
2433 case VINF_EM_DBG_HYPER_STEPPED:
2434 case VINF_EM_DBG_HYPER_BREAKPOINT:
2435 case VINF_EM_DBG_HYPER_ASSERTION:
2436 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2437 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2438 break;
2439
2440 /*
2441 * Triple fault.
2442 */
2443 case VINF_EM_TRIPLE_FAULT:
2444 if (!pVM->em.s.fGuruOnTripleFault)
2445 {
2446 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2447 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2448 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2449 continue;
2450 }
2451 /* Else fall through and trigger a guru. */
2452 RT_FALL_THRU();
2453
2454 case VERR_VMM_RING0_ASSERTION:
2455 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2456 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2457 break;
2458
2459 /*
2460 * Any error code showing up here other than the ones we
2461 * know and process above are considered to be FATAL.
2462 *
2463 * Unknown warnings and informational status codes are also
2464 * included in this.
2465 */
2466 default:
2467 if (RT_SUCCESS_NP(rc))
2468 {
2469 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2470 rc = VERR_EM_INTERNAL_ERROR;
2471 }
2472 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2473 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2474 break;
2475 }
2476
2477 /*
2478 * Act on state transition.
2479 */
2480 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2481 if (enmOldState != enmNewState)
2482 {
2483 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2484
2485 /* Clear MWait flags and the unhalt FF. */
2486 if ( enmOldState == EMSTATE_HALTED
2487 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2488 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2489 && ( enmNewState == EMSTATE_HM
2490 || enmNewState == EMSTATE_NEM
2491 || enmNewState == EMSTATE_RECOMPILER
2492 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2493 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2494 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2495 || enmNewState == EMSTATE_DEBUG_GUEST_RECOMPILER) )
2496 {
2497 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2498 {
2499 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2500 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2501 }
2502 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2503 {
2504 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2505 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2506 }
2507 }
2508 }
2509 else
2510 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2511
2512 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2513 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2514
2515 /*
2516 * Act on the new state.
2517 */
2518 switch (enmNewState)
2519 {
2520 /*
2521 * Execute hardware accelerated raw.
2522 */
2523 case EMSTATE_HM:
2524#ifdef VBOX_WITH_HWVIRT
2525 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2526#else
2527 AssertReleaseFailedStmt(rc = VERR_EM_INTERNAL_ERROR); /* Should never get here. */
2528#endif
2529 break;
2530
2531 /*
2532 * Execute hardware accelerated raw.
2533 */
2534 case EMSTATE_NEM:
2535 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2536 break;
2537
2538 /*
2539 * Execute recompiled.
2540 */
2541 case EMSTATE_RECOMPILER:
2542 rc = VBOXSTRICTRC_TODO(emR3RecompilerExecute(pVM, pVCpu, enmOldState == EMSTATE_HALTED, &fFFDone));
2543 Log2(("EMR3ExecuteVM: emR3RecompilerExecute -> %Rrc\n", rc));
2544 break;
2545
2546 /*
2547 * Execute in the interpreter.
2548 */
2549 case EMSTATE_IEM:
2550 {
2551#if 0 /* For comparing HM and IEM (@bugref{10464}). */
2552 PCPUMCTX const pCtx = &pVCpu->cpum.GstCtx;
2553 PCX86FXSTATE const pX87 = &pCtx->XState.x87;
2554 Log11(("eax=%08x ebx=%08x ecx=%08x edx=%08x esi=%08x edi=%08x\n"
2555 "eip=%08x esp=%08x ebp=%08x eflags=%08x\n"
2556 "cs=%04x ss=%04x ds=%04x es=%04x fs=%04x gs=%04x\n"
2557 "fsw=%04x fcw=%04x ftw=%02x top=%u%s%s%s%s%s%s%s%s%s\n"
2558 "st0=%.10Rhxs st1=%.10Rhxs st2=%.10Rhxs st3=%.10Rhxs\n"
2559 "st4=%.10Rhxs st5=%.10Rhxs st6=%.10Rhxs st7=%.10Rhxs\n",
2560 pCtx->eax, pCtx->ebx, pCtx->ecx, pCtx->edx, pCtx->edi, pCtx->edi,
2561 pCtx->eip, pCtx->esp, pCtx->ebp, pCtx->eflags.u,
2562 pCtx->cs.Sel, pCtx->ss.Sel, pCtx->ds.Sel, pCtx->es.Sel, pCtx->fs.Sel, pCtx->gs.Sel,
2563 pX87->FSW, pX87->FCW, pX87->FTW, X86_FSW_TOP_GET(pX87->FSW),
2564 pX87->FSW & X86_FSW_ES ? " ES!" : "",
2565 pX87->FSW & X86_FSW_IE ? " IE" : "",
2566 pX87->FSW & X86_FSW_DE ? " DE" : "",
2567 pX87->FSW & X86_FSW_SF ? " SF" : "",
2568 pX87->FSW & X86_FSW_B ? " B!" : "",
2569 pX87->FSW & X86_FSW_C0 ? " C0" : "",
2570 pX87->FSW & X86_FSW_C1 ? " C1" : "",
2571 pX87->FSW & X86_FSW_C2 ? " C2" : "",
2572 pX87->FSW & X86_FSW_C3 ? " C3" : "",
2573 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(0)],
2574 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(1)],
2575 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(2)],
2576 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(3)],
2577 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(4)],
2578 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(5)],
2579 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(6)],
2580 &pX87->aRegs[/*X86_FSW_TOP_GET_ST(pVCpu->cpum.GstCtx.XState.x87.FSW,*/(7)]));
2581 DBGFR3DisasInstrCurrentLogInternal(pVCpu, NULL);
2582#endif
2583
2584 uint32_t cInstructions = 0;
2585#if 0 /* For testing purposes. */
2586 //STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2587 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2588 //STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2589 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_EXEC_ENGINE || rc == VINF_EM_RESCHEDULE_REM)
2590 rc = VINF_SUCCESS;
2591 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2592#endif
2593 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2594 if (pVM->em.s.fIemExecutesAll)
2595 {
2596 Assert(rc != VINF_EM_RESCHEDULE_REM);
2597 Assert(rc != VINF_EM_RESCHEDULE_EXEC_ENGINE);
2598#ifdef VBOX_HIGH_RES_TIMERS_HACK
2599 if (cInstructions < 2048)
2600 TMTimerPollVoid(pVM, pVCpu);
2601#endif
2602 }
2603 else if (rc == VINF_SUCCESS)
2604 rc = VINF_EM_RESCHEDULE; /* Need to check whether we can run in HM or NEM again. */
2605#ifdef VBOX_VMM_TARGET_X86
2606 if (rc != VINF_EM_EMULATE_SPLIT_LOCK)
2607 { /* likely */ }
2608 else
2609 rc = VBOXSTRICTRC_TODO(emR3ExecuteSplitLockInstruction(pVM, pVCpu));
2610#endif
2611 fFFDone = false;
2612 break;
2613 }
2614
2615 /*
2616 * Application processor execution halted until SIPI.
2617 */
2618 case EMSTATE_WAIT_SIPI:
2619 /* no break */
2620 /*
2621 * hlt - execution halted until interrupt.
2622 */
2623 case EMSTATE_HALTED:
2624 {
2625 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2626 /* If HM (or someone else) store a pending interrupt in
2627 TRPM, it must be dispatched ASAP without any halting.
2628 Anything pending in TRPM has been accepted and the CPU
2629 should already be the right state to receive it. */
2630 if (TRPMHasTrap(pVCpu))
2631 rc = VINF_EM_RESCHEDULE;
2632#ifdef VBOX_VMM_TARGET_X86
2633 /* MWAIT has a special extension where it's woken up when
2634 an interrupt is pending even when IF=0. */
2635 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2636 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2637 {
2638 rc = VMR3WaitHalted(pVM, pVCpu, 0 /*fFlags*/);
2639 if (rc == VINF_SUCCESS)
2640 {
2641 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2642 PDMApicUpdatePendingInterrupts(pVCpu);
2643
2644 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2645 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2646 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2647 {
2648 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2649 rc = VINF_EM_RESCHEDULE;
2650 }
2651
2652 }
2653 }
2654#endif
2655 else
2656 {
2657#ifdef VBOX_VMM_TARGET_ARMV8
2658 const uint32_t fWaitHalted = 0; /* WFI/WFE always return when an interrupt happens. */
2659#elif defined(VBOX_VMM_TARGET_X86)
2660 const uint32_t fWaitHalted = (CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF) ? 0 : VMWAITHALTED_F_IGNORE_IRQS;
2661#endif
2662 rc = VMR3WaitHalted(pVM, pVCpu, fWaitHalted);
2663
2664 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2665 check VMCPU_FF_UPDATE_APIC here. */
2666 if ( rc == VINF_SUCCESS
2667#ifdef VBOX_VMM_TARGET_ARMV8
2668 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI
2669 | VMCPU_FF_INTERRUPT_FIQ | VMCPU_FF_INTERRUPT_IRQ
2670 | VMCPU_FF_VTIMER_ACTIVATED)
2671#elif defined(VBOX_VMM_TARGET_X86)
2672 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT)
2673#else
2674# error "port me"
2675#endif
2676 )
2677 {
2678 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2679 rc = VINF_EM_RESCHEDULE;
2680 }
2681 }
2682
2683 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2684 break;
2685 }
2686
2687 /*
2688 * Suspended - return to VM.cpp.
2689 */
2690 case EMSTATE_SUSPENDED:
2691 TMR3NotifySuspend(pVM, pVCpu);
2692 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2693 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2694 return VINF_EM_SUSPEND;
2695
2696 /*
2697 * Debugging in the guest.
2698 */
2699 case EMSTATE_DEBUG_GUEST_RAW:
2700 case EMSTATE_DEBUG_GUEST_HM:
2701 case EMSTATE_DEBUG_GUEST_NEM:
2702 case EMSTATE_DEBUG_GUEST_IEM:
2703 case EMSTATE_DEBUG_GUEST_RECOMPILER:
2704 TMR3NotifySuspend(pVM, pVCpu);
2705 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2706 TMR3NotifyResume(pVM, pVCpu);
2707 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2708 break;
2709
2710 /*
2711 * Debugging in the hypervisor.
2712 */
2713 case EMSTATE_DEBUG_HYPER:
2714 {
2715 TMR3NotifySuspend(pVM, pVCpu);
2716 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2717
2718 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2719 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2720 if (rc != VINF_SUCCESS)
2721 {
2722 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2723 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2724 else
2725 {
2726 /* switch to guru meditation mode */
2727 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2728 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2729 VMMR3FatalDump(pVM, pVCpu, rc);
2730 }
2731 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2732 return rc;
2733 }
2734
2735 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2736 TMR3NotifyResume(pVM, pVCpu);
2737 break;
2738 }
2739
2740 /*
2741 * Guru meditation takes place in the debugger.
2742 */
2743 case EMSTATE_GURU_MEDITATION:
2744 {
2745 TMR3NotifySuspend(pVM, pVCpu);
2746 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2747 VMMR3FatalDump(pVM, pVCpu, rc);
2748 emR3Debug(pVM, pVCpu, rc);
2749 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2750 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2751 return rc;
2752 }
2753
2754 /*
2755 * The states we don't expect here.
2756 */
2757 case EMSTATE_NONE:
2758 case EMSTATE_RAW_OBSOLETE:
2759 case EMSTATE_IEM_THEN_REM_OBSOLETE:
2760 case EMSTATE_TERMINATING:
2761 default:
2762 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2763 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2764 TMR3NotifySuspend(pVM, pVCpu);
2765 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2766 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2767 return VERR_EM_INTERNAL_ERROR;
2768 }
2769 } /* The Outer Main Loop */
2770 }
2771 else
2772 {
2773 /*
2774 * Fatal error.
2775 */
2776 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2777 TMR3NotifySuspend(pVM, pVCpu);
2778 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2779 VMMR3FatalDump(pVM, pVCpu, rc);
2780 emR3Debug(pVM, pVCpu, rc);
2781 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2782 /** @todo change the VM state! */
2783 return rc;
2784 }
2785
2786 /* not reached */
2787}
2788
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette