VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 98970

Last change on this file since 98970 was 98103, checked in by vboxsync, 2 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 112.9 KB
Line 
1/* $Id: EM.cpp 98103 2023-01-17 14:15:46Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2023 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * SPDX-License-Identifier: GPL-3.0-only
26 */
27
28/** @page pg_em EM - The Execution Monitor / Manager
29 *
30 * The Execution Monitor/Manager is responsible for running the VM, scheduling
31 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
32 * Interpreted), and keeping the CPU states in sync. The function
33 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
34 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
35 * emR3RemExecute).
36 *
37 * The interpreted execution is only used to avoid switching between
38 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
39 * The interpretation is thus implemented as part of EM.
40 *
41 * @see grp_em
42 */
43
44
45/*********************************************************************************************************************************
46* Header Files *
47*********************************************************************************************************************************/
48#define LOG_GROUP LOG_GROUP_EM
49#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET & interrupt injection */
50#include <VBox/vmm/em.h>
51#include <VBox/vmm/vmm.h>
52#include <VBox/vmm/selm.h>
53#include <VBox/vmm/trpm.h>
54#include <VBox/vmm/iem.h>
55#include <VBox/vmm/nem.h>
56#include <VBox/vmm/iom.h>
57#include <VBox/vmm/dbgf.h>
58#include <VBox/vmm/pgm.h>
59#include <VBox/vmm/apic.h>
60#include <VBox/vmm/tm.h>
61#include <VBox/vmm/mm.h>
62#include <VBox/vmm/ssm.h>
63#include <VBox/vmm/pdmapi.h>
64#include <VBox/vmm/pdmcritsect.h>
65#include <VBox/vmm/pdmqueue.h>
66#include <VBox/vmm/hm.h>
67#include "EMInternal.h"
68#include <VBox/vmm/vm.h>
69#include <VBox/vmm/uvm.h>
70#include <VBox/vmm/cpumdis.h>
71#include <VBox/dis.h>
72#include <VBox/disopcode.h>
73#include <VBox/err.h>
74#include "VMMTracing.h"
75
76#include <iprt/asm.h>
77#include <iprt/string.h>
78#include <iprt/stream.h>
79#include <iprt/thread.h>
80
81
82/*********************************************************************************************************************************
83* Internal Functions *
84*********************************************************************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
88static const char *emR3GetStateName(EMSTATE enmState);
89#endif
90static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
91#if defined(VBOX_WITH_REM) || defined(DEBUG)
92static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
93#endif
94static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
95
96
97/**
98 * Initializes the EM.
99 *
100 * @returns VBox status code.
101 * @param pVM The cross context VM structure.
102 */
103VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
104{
105 LogFlow(("EMR3Init\n"));
106 /*
107 * Assert alignment and sizes.
108 */
109 AssertCompileMemberAlignment(VM, em.s, 32);
110 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
111 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
112 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
113
114 /*
115 * Init the structure.
116 */
117 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
118 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
119
120 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll,
121#if defined(RT_ARCH_ARM64) && defined(RT_OS_DARWIN)
122 true
123#else
124 false
125#endif
126 );
127 AssertLogRelRCReturn(rc, rc);
128
129 bool fEnabled;
130 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
131 AssertLogRelRCReturn(rc, rc);
132 pVM->em.s.fGuruOnTripleFault = !fEnabled;
133 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
134 {
135 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
136 pVM->em.s.fGuruOnTripleFault = true;
137 }
138
139 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
140
141 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
142 * Whether to try correlate exit history in any context, detect hot spots and
143 * try optimize these using IEM if there are other exits close by. This
144 * overrides the context specific settings. */
145 bool fExitOptimizationEnabled = true;
146 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
147 AssertLogRelRCReturn(rc, rc);
148
149 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
150 * Whether to optimize exits in ring-0. Setting this to false will also disable
151 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
152 * capabilities of the host kernel, this optimization may be unavailable. */
153 bool fExitOptimizationEnabledR0 = true;
154 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
155 AssertLogRelRCReturn(rc, rc);
156 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
157
158 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
159 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
160 * hooks are in effect). */
161 /** @todo change the default to true here */
162 bool fExitOptimizationEnabledR0PreemptDisabled = true;
163 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
164 AssertLogRelRCReturn(rc, rc);
165 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
166
167 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
168 * Maximum number of instruction to let EMHistoryExec execute in one go. */
169 uint16_t cHistoryExecMaxInstructions = 8192;
170 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
171 AssertLogRelRCReturn(rc, rc);
172 if (cHistoryExecMaxInstructions < 16)
173 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
174
175 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
176 * Maximum number of instruction between exits during probing. */
177 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
178#ifdef RT_OS_WINDOWS
179 if (VM_IS_NEM_ENABLED(pVM))
180 cHistoryProbeMaxInstructionsWithoutExit = 32;
181#endif
182 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
183 cHistoryProbeMaxInstructionsWithoutExit);
184 AssertLogRelRCReturn(rc, rc);
185 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
186 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
187 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
188
189 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
190 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
191 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
192 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
193 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
194 cHistoryProbeMinInstructions);
195 AssertLogRelRCReturn(rc, rc);
196
197 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
198 {
199 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
200 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
201 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
202 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
203 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
204 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
205 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
206 }
207
208 /*
209 * Saved state.
210 */
211 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
212 NULL, NULL, NULL,
213 NULL, emR3Save, NULL,
214 NULL, emR3Load, NULL);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
219 {
220 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
221
222 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
223 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
224 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
225 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
226
227# define EM_REG_COUNTER(a, b, c) \
228 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
229 AssertRC(rc);
230
231# define EM_REG_COUNTER_USED(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_PROFILE(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
237 AssertRC(rc);
238
239# define EM_REG_PROFILE_ADV(a, b, c) \
240 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
241 AssertRC(rc);
242
243 /*
244 * Statistics.
245 */
246#ifdef VBOX_WITH_STATISTICS
247 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
248 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
249
250 /* these should be considered for release statistics. */
251 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
252 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
253 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
254#endif
255 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
257#ifdef VBOX_WITH_STATISTICS
258 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
259 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
260 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
261#endif
262 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
263 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
264#ifdef VBOX_WITH_STATISTICS
265 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
266 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
267 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
268 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
269 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
270 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
271#endif /* VBOX_WITH_STATISTICS */
272
273 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
274 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
275 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
276 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
277 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
278
279 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
280
281 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
282 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
283 AssertRC(rc);
284
285 /* History record statistics */
286 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
287 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
288 AssertRC(rc);
289
290 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
291 {
292 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
293 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
294 AssertRC(rc);
295 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
296 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
297 AssertRC(rc);
298 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
299 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
300 AssertRC(rc);
301 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
302 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
303 AssertRC(rc);
304 }
305
306 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
307 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
308 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
309 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
310 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
311 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
312 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
313 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
314 }
315
316 emR3InitDbg(pVM);
317 return VINF_SUCCESS;
318}
319
320
321/**
322 * Called when a VM initialization stage is completed.
323 *
324 * @returns VBox status code.
325 * @param pVM The cross context VM structure.
326 * @param enmWhat The initialization state that was completed.
327 */
328VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
329{
330 if (enmWhat == VMINITCOMPLETED_RING0)
331 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
332 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
333 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
334 return VINF_SUCCESS;
335}
336
337
338/**
339 * Applies relocations to data and code managed by this
340 * component. This function will be called at init and
341 * whenever the VMM need to relocate it self inside the GC.
342 *
343 * @param pVM The cross context VM structure.
344 */
345VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
346{
347 LogFlow(("EMR3Relocate\n"));
348 RT_NOREF(pVM);
349}
350
351
352/**
353 * Reset the EM state for a CPU.
354 *
355 * Called by EMR3Reset and hot plugging.
356 *
357 * @param pVCpu The cross context virtual CPU structure.
358 */
359VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
360{
361 /* Reset scheduling state. */
362 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
363
364 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
365 out of the HALTED state here so that enmPrevState doesn't end up as
366 HALTED when EMR3Execute returns. */
367 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
368 {
369 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
370 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
371 }
372}
373
374
375/**
376 * Reset notification.
377 *
378 * @param pVM The cross context VM structure.
379 */
380VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
381{
382 Log(("EMR3Reset: \n"));
383 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
384 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
385}
386
387
388/**
389 * Terminates the EM.
390 *
391 * Termination means cleaning up and freeing all resources,
392 * the VM it self is at this point powered off or suspended.
393 *
394 * @returns VBox status code.
395 * @param pVM The cross context VM structure.
396 */
397VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
398{
399 RT_NOREF(pVM);
400 return VINF_SUCCESS;
401}
402
403
404/**
405 * Execute state save operation.
406 *
407 * @returns VBox status code.
408 * @param pVM The cross context VM structure.
409 * @param pSSM SSM operation handle.
410 */
411static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
412{
413 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
414 {
415 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
416
417 SSMR3PutBool(pSSM, false /*fForceRAW*/);
418
419 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
420 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
421 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
422
423 /* Save mwait state. */
424 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
425 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
426 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
427 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
428 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
429 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
430 AssertRCReturn(rc, rc);
431 }
432 return VINF_SUCCESS;
433}
434
435
436/**
437 * Execute state load operation.
438 *
439 * @returns VBox status code.
440 * @param pVM The cross context VM structure.
441 * @param pSSM SSM operation handle.
442 * @param uVersion Data layout version.
443 * @param uPass The data pass.
444 */
445static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
446{
447 /*
448 * Validate version.
449 */
450 if ( uVersion > EM_SAVED_STATE_VERSION
451 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
452 {
453 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
454 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
455 }
456 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
457
458 /*
459 * Load the saved state.
460 */
461 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
462 {
463 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
464
465 bool fForceRAWIgnored;
466 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
467 AssertRCReturn(rc, rc);
468
469 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
470 {
471 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
472 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
473
474 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
475 }
476 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
477 {
478 /* Load mwait state. */
479 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
480 AssertRCReturn(rc, rc);
481 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
482 AssertRCReturn(rc, rc);
483 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
484 AssertRCReturn(rc, rc);
485 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
486 AssertRCReturn(rc, rc);
487 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
488 AssertRCReturn(rc, rc);
489 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
490 AssertRCReturn(rc, rc);
491 }
492 }
493 return VINF_SUCCESS;
494}
495
496
497/**
498 * Argument packet for emR3SetExecutionPolicy.
499 */
500struct EMR3SETEXECPOLICYARGS
501{
502 EMEXECPOLICY enmPolicy;
503 bool fEnforce;
504};
505
506
507/**
508 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
509 */
510static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
511{
512 /*
513 * Only the first CPU changes the variables.
514 */
515 if (pVCpu->idCpu == 0)
516 {
517 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
518 switch (pArgs->enmPolicy)
519 {
520 case EMEXECPOLICY_RECOMPILE_RING0:
521 case EMEXECPOLICY_RECOMPILE_RING3:
522 break;
523 case EMEXECPOLICY_IEM_ALL:
524 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
525
526 /* For making '.alliem 1' useful during debugging, transition the
527 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
528 for (VMCPUID i = 0; i < pVM->cCpus; i++)
529 {
530 PVMCPU pVCpuX = pVM->apCpusR3[i];
531 switch (pVCpuX->em.s.enmState)
532 {
533 case EMSTATE_DEBUG_GUEST_RAW:
534 case EMSTATE_DEBUG_GUEST_HM:
535 case EMSTATE_DEBUG_GUEST_NEM:
536 case EMSTATE_DEBUG_GUEST_REM:
537 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
538 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
539 break;
540 case EMSTATE_DEBUG_GUEST_IEM:
541 default:
542 break;
543 }
544 }
545 break;
546 default:
547 AssertFailedReturn(VERR_INVALID_PARAMETER);
548 }
549 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
550 }
551
552 /*
553 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
554 */
555 return pVCpu->em.s.enmState == EMSTATE_RAW
556 || pVCpu->em.s.enmState == EMSTATE_HM
557 || pVCpu->em.s.enmState == EMSTATE_NEM
558 || pVCpu->em.s.enmState == EMSTATE_IEM
559 || pVCpu->em.s.enmState == EMSTATE_REM
560 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
561 ? VINF_EM_RESCHEDULE
562 : VINF_SUCCESS;
563}
564
565
566/**
567 * Changes an execution scheduling policy parameter.
568 *
569 * This is used to enable or disable raw-mode / hardware-virtualization
570 * execution of user and supervisor code.
571 *
572 * @returns VINF_SUCCESS on success.
573 * @returns VINF_RESCHEDULE if a rescheduling might be required.
574 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
575 *
576 * @param pUVM The user mode VM handle.
577 * @param enmPolicy The scheduling policy to change.
578 * @param fEnforce Whether to enforce the policy or not.
579 */
580VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
581{
582 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
583 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
584 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
585
586 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
587 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
588}
589
590
591/**
592 * Queries an execution scheduling policy parameter.
593 *
594 * @returns VBox status code
595 * @param pUVM The user mode VM handle.
596 * @param enmPolicy The scheduling policy to query.
597 * @param pfEnforced Where to return the current value.
598 */
599VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
600{
601 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
602 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
603 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
604 PVM pVM = pUVM->pVM;
605 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
606
607 /* No need to bother EMTs with a query. */
608 switch (enmPolicy)
609 {
610 case EMEXECPOLICY_RECOMPILE_RING0:
611 case EMEXECPOLICY_RECOMPILE_RING3:
612 *pfEnforced = false;
613 break;
614 case EMEXECPOLICY_IEM_ALL:
615 *pfEnforced = pVM->em.s.fIemExecutesAll;
616 break;
617 default:
618 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
619 }
620
621 return VINF_SUCCESS;
622}
623
624
625/**
626 * Queries the main execution engine of the VM.
627 *
628 * @returns VBox status code
629 * @param pUVM The user mode VM handle.
630 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
631 */
632VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
633{
634 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
635 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
636
637 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
638 PVM pVM = pUVM->pVM;
639 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
640
641 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
642 return VINF_SUCCESS;
643}
644
645
646/**
647 * Raise a fatal error.
648 *
649 * Safely terminate the VM with full state report and stuff. This function
650 * will naturally never return.
651 *
652 * @param pVCpu The cross context virtual CPU structure.
653 * @param rc VBox status code.
654 */
655VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
656{
657 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
658 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
659}
660
661
662#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
663/**
664 * Gets the EM state name.
665 *
666 * @returns pointer to read only state name,
667 * @param enmState The state.
668 */
669static const char *emR3GetStateName(EMSTATE enmState)
670{
671 switch (enmState)
672 {
673 case EMSTATE_NONE: return "EMSTATE_NONE";
674 case EMSTATE_RAW: return "EMSTATE_RAW";
675 case EMSTATE_HM: return "EMSTATE_HM";
676 case EMSTATE_IEM: return "EMSTATE_IEM";
677 case EMSTATE_REM: return "EMSTATE_REM";
678 case EMSTATE_HALTED: return "EMSTATE_HALTED";
679 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
680 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
681 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
682 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
683 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
684 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
685 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
686 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
687 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
688 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
689 case EMSTATE_NEM: return "EMSTATE_NEM";
690 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
691 default: return "Unknown!";
692 }
693}
694#endif /* LOG_ENABLED || VBOX_STRICT */
695
696
697/**
698 * Handle pending ring-3 I/O port write.
699 *
700 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
701 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
702 *
703 * @returns Strict VBox status code.
704 * @param pVM The cross context VM structure.
705 * @param pVCpu The cross context virtual CPU structure.
706 */
707VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
708{
709 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
710
711 /* Get and clear the pending data. */
712 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
713 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
714 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
715 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
716 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
717
718 /* Assert sanity. */
719 switch (cbValue)
720 {
721 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
722 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
723 case 4: break;
724 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
725 }
726 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
727
728 /* Do the work.*/
729 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
730 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
731 if (IOM_SUCCESS(rcStrict))
732 {
733 pVCpu->cpum.GstCtx.rip += cbInstr;
734 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
735 }
736 return rcStrict;
737}
738
739
740/**
741 * Handle pending ring-3 I/O port write.
742 *
743 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
744 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
745 *
746 * @returns Strict VBox status code.
747 * @param pVM The cross context VM structure.
748 * @param pVCpu The cross context virtual CPU structure.
749 */
750VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
751{
752 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
753
754 /* Get and clear the pending data. */
755 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
756 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
757 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
758 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
759
760 /* Assert sanity. */
761 switch (cbValue)
762 {
763 case 1: break;
764 case 2: break;
765 case 4: break;
766 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
767 }
768 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
769 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
770
771 /* Do the work.*/
772 uint32_t uValue = 0;
773 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
774 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
775 if (IOM_SUCCESS(rcStrict))
776 {
777 if (cbValue == 4)
778 pVCpu->cpum.GstCtx.rax = uValue;
779 else if (cbValue == 2)
780 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
781 else
782 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
783 pVCpu->cpum.GstCtx.rip += cbInstr;
784 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
785 }
786 return rcStrict;
787}
788
789
790/**
791 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
792 * Worker for emR3ExecuteSplitLockInstruction}
793 */
794static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
795{
796 /* Only execute on the specified EMT. */
797 if (pVCpu == (PVMCPU)pvUser)
798 {
799 LogFunc(("\n"));
800 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
801 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
802 if (rcStrict == VINF_IEM_RAISED_XCPT)
803 rcStrict = VINF_SUCCESS;
804 return rcStrict;
805 }
806 RT_NOREF(pVM);
807 return VINF_SUCCESS;
808}
809
810
811/**
812 * Handle an instruction causing a split cacheline lock access in SMP VMs.
813 *
814 * Generally we only get here if the host has split-lock detection enabled and
815 * this caused an \#AC because of something the guest did. If we interpret the
816 * instruction as-is, we'll likely just repeat the split-lock access and
817 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
818 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
819 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
820 * disregard the lock prefix when emulating the instruction.
821 *
822 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
823 * feature when entering guest context, but the support for the feature isn't a
824 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
825 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
826 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
827 * propert detection to SUPDrv later if we find it necessary.
828 *
829 * @see @bugref{10052}
830 *
831 * @returns Strict VBox status code.
832 * @param pVM The cross context VM structure.
833 * @param pVCpu The cross context virtual CPU structure.
834 */
835VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
836{
837 LogFunc(("\n"));
838 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
839}
840
841
842/**
843 * Debug loop.
844 *
845 * @returns VBox status code for EM.
846 * @param pVM The cross context VM structure.
847 * @param pVCpu The cross context virtual CPU structure.
848 * @param rc Current EM VBox status code.
849 */
850static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
851{
852 for (;;)
853 {
854 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
855 const VBOXSTRICTRC rcLast = rc;
856
857 /*
858 * Debug related RC.
859 */
860 switch (VBOXSTRICTRC_VAL(rc))
861 {
862 /*
863 * Single step an instruction.
864 */
865 case VINF_EM_DBG_STEP:
866 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
867 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
868 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
869 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
870 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
871 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
872 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
873#ifdef VBOX_WITH_REM /** @todo fix me? */
874 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
875 rc = emR3RemStep(pVM, pVCpu);
876#endif
877 else
878 {
879 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
880 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
881 rc = VINF_EM_DBG_STEPPED;
882 }
883 break;
884
885 /*
886 * Simple events: stepped, breakpoint, stop/assertion.
887 */
888 case VINF_EM_DBG_STEPPED:
889 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
890 break;
891
892 case VINF_EM_DBG_BREAKPOINT:
893 rc = DBGFR3BpHit(pVM, pVCpu);
894 break;
895
896 case VINF_EM_DBG_STOP:
897 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
898 break;
899
900 case VINF_EM_DBG_EVENT:
901 rc = DBGFR3EventHandlePending(pVM, pVCpu);
902 break;
903
904 case VINF_EM_DBG_HYPER_STEPPED:
905 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
906 break;
907
908 case VINF_EM_DBG_HYPER_BREAKPOINT:
909 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
910 break;
911
912 case VINF_EM_DBG_HYPER_ASSERTION:
913 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
914 RTLogFlush(NULL);
915 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
916 break;
917
918 /*
919 * Guru meditation.
920 */
921 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
922 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
923 break;
924 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
925 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
926 break;
927 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
928 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
929 break;
930
931 default: /** @todo don't use default for guru, but make special errors code! */
932 {
933 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
934 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
935 break;
936 }
937 }
938
939 /*
940 * Process the result.
941 */
942 switch (VBOXSTRICTRC_VAL(rc))
943 {
944 /*
945 * Continue the debugging loop.
946 */
947 case VINF_EM_DBG_STEP:
948 case VINF_EM_DBG_STOP:
949 case VINF_EM_DBG_EVENT:
950 case VINF_EM_DBG_STEPPED:
951 case VINF_EM_DBG_BREAKPOINT:
952 case VINF_EM_DBG_HYPER_STEPPED:
953 case VINF_EM_DBG_HYPER_BREAKPOINT:
954 case VINF_EM_DBG_HYPER_ASSERTION:
955 break;
956
957 /*
958 * Resuming execution (in some form) has to be done here if we got
959 * a hypervisor debug event.
960 */
961 case VINF_SUCCESS:
962 case VINF_EM_RESUME:
963 case VINF_EM_SUSPEND:
964 case VINF_EM_RESCHEDULE:
965 case VINF_EM_RESCHEDULE_RAW:
966 case VINF_EM_RESCHEDULE_REM:
967 case VINF_EM_HALT:
968 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
969 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
970 if (rc == VINF_SUCCESS)
971 rc = VINF_EM_RESCHEDULE;
972 return rc;
973
974 /*
975 * The debugger isn't attached.
976 * We'll simply turn the thing off since that's the easiest thing to do.
977 */
978 case VERR_DBGF_NOT_ATTACHED:
979 switch (VBOXSTRICTRC_VAL(rcLast))
980 {
981 case VINF_EM_DBG_HYPER_STEPPED:
982 case VINF_EM_DBG_HYPER_BREAKPOINT:
983 case VINF_EM_DBG_HYPER_ASSERTION:
984 case VERR_TRPM_PANIC:
985 case VERR_TRPM_DONT_PANIC:
986 case VERR_VMM_RING0_ASSERTION:
987 case VERR_VMM_HYPER_CR3_MISMATCH:
988 case VERR_VMM_RING3_CALL_DISABLED:
989 return rcLast;
990 }
991 return VINF_EM_OFF;
992
993 /*
994 * Status codes terminating the VM in one or another sense.
995 */
996 case VINF_EM_TERMINATE:
997 case VINF_EM_OFF:
998 case VINF_EM_RESET:
999 case VINF_EM_NO_MEMORY:
1000 case VINF_EM_RAW_STALE_SELECTOR:
1001 case VINF_EM_RAW_IRET_TRAP:
1002 case VERR_TRPM_PANIC:
1003 case VERR_TRPM_DONT_PANIC:
1004 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1005 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1006 case VERR_VMM_RING0_ASSERTION:
1007 case VERR_VMM_HYPER_CR3_MISMATCH:
1008 case VERR_VMM_RING3_CALL_DISABLED:
1009 case VERR_INTERNAL_ERROR:
1010 case VERR_INTERNAL_ERROR_2:
1011 case VERR_INTERNAL_ERROR_3:
1012 case VERR_INTERNAL_ERROR_4:
1013 case VERR_INTERNAL_ERROR_5:
1014 case VERR_IPE_UNEXPECTED_STATUS:
1015 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1016 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1017 return rc;
1018
1019 /*
1020 * The rest is unexpected, and will keep us here.
1021 */
1022 default:
1023 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1024 break;
1025 }
1026 } /* debug for ever */
1027}
1028
1029
1030#if defined(VBOX_WITH_REM) || defined(DEBUG)
1031/**
1032 * Steps recompiled code.
1033 *
1034 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1035 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1036 *
1037 * @param pVM The cross context VM structure.
1038 * @param pVCpu The cross context virtual CPU structure.
1039 */
1040static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1041{
1042 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1043
1044 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1045
1046 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1047 return rc;
1048}
1049#endif /* VBOX_WITH_REM || DEBUG */
1050
1051
1052/**
1053 * Executes recompiled code.
1054 *
1055 * This function contains the recompiler version of the inner
1056 * execution loop (the outer loop being in EMR3ExecuteVM()).
1057 *
1058 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1059 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1060 *
1061 * @param pVM The cross context VM structure.
1062 * @param pVCpu The cross context virtual CPU structure.
1063 * @param pfFFDone Where to store an indicator telling whether or not
1064 * FFs were done before returning.
1065 *
1066 */
1067static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1068{
1069#ifdef LOG_ENABLED
1070 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1071
1072 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1073 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1074 else
1075 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1076#endif
1077 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1078
1079 /*
1080 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1081 * or the REM suggests raw-mode execution.
1082 */
1083 *pfFFDone = false;
1084 uint32_t cLoops = 0;
1085 int rc = VINF_SUCCESS;
1086 for (;;)
1087 {
1088 /*
1089 * Execute REM.
1090 */
1091 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1092 {
1093 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1094 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1095 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1096 }
1097 else
1098 {
1099 /* Give up this time slice; virtual time continues */
1100 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1101 RTThreadSleep(5);
1102 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1103 rc = VINF_SUCCESS;
1104 }
1105
1106 /*
1107 * Deal with high priority post execution FFs before doing anything
1108 * else. Sync back the state and leave the lock to be on the safe side.
1109 */
1110 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1111 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1112 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1113
1114 /*
1115 * Process the returned status code.
1116 */
1117 if (rc != VINF_SUCCESS)
1118 {
1119 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1120 break;
1121 if (rc != VINF_REM_INTERRUPED_FF)
1122 {
1123 /* Try dodge unimplemented IEM trouble by reschduling. */
1124 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1125 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1126 {
1127 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1128 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1129 {
1130 rc = VINF_EM_RESCHEDULE;
1131 break;
1132 }
1133 }
1134
1135 /*
1136 * Anything which is not known to us means an internal error
1137 * and the termination of the VM!
1138 */
1139 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1140 break;
1141 }
1142 }
1143
1144
1145 /*
1146 * Check and execute forced actions.
1147 *
1148 * Sync back the VM state and leave the lock before calling any of
1149 * these, you never know what's going to happen here.
1150 */
1151#ifdef VBOX_HIGH_RES_TIMERS_HACK
1152 TMTimerPollVoid(pVM, pVCpu);
1153#endif
1154 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1155 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1156 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1157 {
1158 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1159 rc = emR3ForcedActions(pVM, pVCpu, rc);
1160 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1161 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1162 if ( rc != VINF_SUCCESS
1163 && rc != VINF_EM_RESCHEDULE_REM)
1164 {
1165 *pfFFDone = true;
1166 break;
1167 }
1168 }
1169
1170 /*
1171 * Have to check if we can get back to fast execution mode every so often.
1172 */
1173 if (!(++cLoops & 7))
1174 {
1175 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1176 if ( enmCheck != EMSTATE_REM
1177 && enmCheck != EMSTATE_IEM_THEN_REM)
1178 {
1179 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
1180 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1181 return VINF_EM_RESCHEDULE;
1182 }
1183 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
1184 }
1185
1186 } /* The Inner Loop, recompiled execution mode version. */
1187
1188 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1189 return rc;
1190}
1191
1192
1193#ifdef DEBUG
1194
1195int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1196{
1197 EMSTATE enmOldState = pVCpu->em.s.enmState;
1198
1199 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1200
1201 Log(("Single step BEGIN:\n"));
1202 for (uint32_t i = 0; i < cIterations; i++)
1203 {
1204 DBGFR3PrgStep(pVCpu);
1205 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1206 emR3RemStep(pVM, pVCpu);
1207 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1208 break;
1209 }
1210 Log(("Single step END:\n"));
1211 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1212 pVCpu->em.s.enmState = enmOldState;
1213 return VINF_EM_RESCHEDULE;
1214}
1215
1216#endif /* DEBUG */
1217
1218
1219/**
1220 * Try execute the problematic code in IEM first, then fall back on REM if there
1221 * is too much of it or if IEM doesn't implement something.
1222 *
1223 * @returns Strict VBox status code from IEMExecLots.
1224 * @param pVM The cross context VM structure.
1225 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1226 * @param pfFFDone Force flags done indicator.
1227 *
1228 * @thread EMT(pVCpu)
1229 */
1230static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1231{
1232 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1233 *pfFFDone = false;
1234
1235 /*
1236 * Execute in IEM for a while.
1237 */
1238 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1239 {
1240 uint32_t cInstructions;
1241 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1242 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1243 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1244 if (rcStrict != VINF_SUCCESS)
1245 {
1246 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1247 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1248 break;
1249
1250 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1251 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1252 return rcStrict;
1253 }
1254
1255 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1256 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1257 {
1258 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1259 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1260 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1261 pVCpu->em.s.enmState = enmNewState;
1262 return VINF_SUCCESS;
1263 }
1264
1265 /*
1266 * Check for pending actions.
1267 */
1268 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1269 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1270 return VINF_SUCCESS;
1271 }
1272
1273 /*
1274 * Switch to REM.
1275 */
1276 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1277 pVCpu->em.s.enmState = EMSTATE_REM;
1278 return VINF_SUCCESS;
1279}
1280
1281
1282/**
1283 * Decides whether to execute RAW, HWACC or REM.
1284 *
1285 * @returns new EM state
1286 * @param pVM The cross context VM structure.
1287 * @param pVCpu The cross context virtual CPU structure.
1288 */
1289EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1290{
1291 /*
1292 * We stay in the wait for SIPI state unless explicitly told otherwise.
1293 */
1294 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1295 return EMSTATE_WAIT_SIPI;
1296
1297 /*
1298 * Execute everything in IEM?
1299 */
1300 if ( pVM->em.s.fIemExecutesAll
1301 || VM_IS_EXEC_ENGINE_IEM(pVM))
1302 return EMSTATE_IEM;
1303
1304 if (VM_IS_HM_ENABLED(pVM))
1305 {
1306 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1307 return EMSTATE_HM;
1308 }
1309 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1310 return EMSTATE_NEM;
1311
1312 /*
1313 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1314 * turns off monitoring features essential for raw mode!
1315 */
1316 return EMSTATE_IEM_THEN_REM;
1317}
1318
1319
1320/**
1321 * Executes all high priority post execution force actions.
1322 *
1323 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1324 * fatal error status code.
1325 *
1326 * @param pVM The cross context VM structure.
1327 * @param pVCpu The cross context virtual CPU structure.
1328 * @param rc The current strict VBox status code rc.
1329 */
1330VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1331{
1332 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1333
1334 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1335 PDMCritSectBothFF(pVM, pVCpu);
1336
1337 /* Update CR3 (Nested Paging case for HM). */
1338 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1339 {
1340 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1341 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1342 if (RT_FAILURE(rc2))
1343 return rc2;
1344 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1345 }
1346
1347 /* IEM has pending work (typically memory write after INS instruction). */
1348 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1349 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1350
1351 /* IOM has pending work (comitting an I/O or MMIO write). */
1352 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1353 {
1354 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1355 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1356 { /* half likely, or at least it's a line shorter. */ }
1357 else if (rc == VINF_SUCCESS)
1358 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1359 else
1360 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1361 }
1362
1363 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1364 {
1365 if ( rc > VINF_EM_NO_MEMORY
1366 && rc <= VINF_EM_LAST)
1367 rc = VINF_EM_NO_MEMORY;
1368 }
1369
1370 return rc;
1371}
1372
1373
1374/**
1375 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1376 *
1377 * @returns VBox status code.
1378 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1379 * @param pVCpu The cross context virtual CPU structure.
1380 */
1381static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1382{
1383#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1384 /* Handle the "external interrupt" VM-exit intercept. */
1385 if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
1386 {
1387 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1388 AssertMsg( rcStrict != VINF_VMX_VMEXIT
1389 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1390 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1391 return VBOXSTRICTRC_TODO(rcStrict);
1392 }
1393#else
1394 RT_NOREF(pVCpu);
1395#endif
1396 return VINF_NO_CHANGE;
1397}
1398
1399
1400/**
1401 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1402 *
1403 * @returns VBox status code.
1404 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1405 * @param pVCpu The cross context virtual CPU structure.
1406 */
1407static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1408{
1409#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1410 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1411 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1412 {
1413 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1414 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1415 if (RT_SUCCESS(rcStrict))
1416 {
1417 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1418 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1419 return VBOXSTRICTRC_VAL(rcStrict);
1420 }
1421
1422 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1423 return VINF_EM_TRIPLE_FAULT;
1424 }
1425#else
1426 NOREF(pVCpu);
1427#endif
1428 return VINF_NO_CHANGE;
1429}
1430
1431
1432/**
1433 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1434 *
1435 * @returns VBox status code.
1436 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1437 * @param pVCpu The cross context virtual CPU structure.
1438 */
1439static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1440{
1441#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1442 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1443 {
1444 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1445 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1446 if (RT_SUCCESS(rcStrict))
1447 {
1448 Assert(rcStrict != VINF_SVM_VMEXIT);
1449 return VBOXSTRICTRC_VAL(rcStrict);
1450 }
1451 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1452 return VINF_EM_TRIPLE_FAULT;
1453 }
1454#else
1455 NOREF(pVCpu);
1456#endif
1457 return VINF_NO_CHANGE;
1458}
1459
1460
1461/**
1462 * Executes all pending forced actions.
1463 *
1464 * Forced actions can cause execution delays and execution
1465 * rescheduling. The first we deal with using action priority, so
1466 * that for instance pending timers aren't scheduled and ran until
1467 * right before execution. The rescheduling we deal with using
1468 * return codes. The same goes for VM termination, only in that case
1469 * we exit everything.
1470 *
1471 * @returns VBox status code of equal or greater importance/severity than rc.
1472 * The most important ones are: VINF_EM_RESCHEDULE,
1473 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1474 *
1475 * @param pVM The cross context VM structure.
1476 * @param pVCpu The cross context virtual CPU structure.
1477 * @param rc The current rc.
1478 *
1479 */
1480int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1481{
1482 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1483#ifdef VBOX_STRICT
1484 int rcIrq = VINF_SUCCESS;
1485#endif
1486 int rc2;
1487#define UPDATE_RC() \
1488 do { \
1489 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1490 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1491 break; \
1492 if (!rc || rc2 < rc) \
1493 rc = rc2; \
1494 } while (0)
1495 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1496
1497 /*
1498 * Post execution chunk first.
1499 */
1500 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1501 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1502 {
1503 /*
1504 * EMT Rendezvous (must be serviced before termination).
1505 */
1506 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1507 {
1508 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1509 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1510 UPDATE_RC();
1511 /** @todo HACK ALERT! The following test is to make sure EM+TM
1512 * thinks the VM is stopped/reset before the next VM state change
1513 * is made. We need a better solution for this, or at least make it
1514 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1515 * VINF_EM_SUSPEND). */
1516 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1517 {
1518 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1519 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1520 return rc;
1521 }
1522 }
1523
1524 /*
1525 * State change request (cleared by vmR3SetStateLocked).
1526 */
1527 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1528 {
1529 VMSTATE enmState = VMR3GetState(pVM);
1530 switch (enmState)
1531 {
1532 case VMSTATE_FATAL_ERROR:
1533 case VMSTATE_FATAL_ERROR_LS:
1534 case VMSTATE_GURU_MEDITATION:
1535 case VMSTATE_GURU_MEDITATION_LS:
1536 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1537 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1538 return VINF_EM_SUSPEND;
1539
1540 case VMSTATE_DESTROYING:
1541 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1542 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1543 return VINF_EM_TERMINATE;
1544
1545 default:
1546 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1547 }
1548 }
1549
1550 /*
1551 * Debugger Facility polling.
1552 */
1553 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1554 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1555 {
1556 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1557 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1558 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1559 * somewhere before we get here, I would think. */
1560 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1561 rc = rc2;
1562 else
1563 UPDATE_RC();
1564 }
1565
1566 /*
1567 * Postponed reset request.
1568 */
1569 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1570 {
1571 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1572 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1573 UPDATE_RC();
1574 }
1575
1576 /*
1577 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1578 */
1579 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1580 {
1581 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1582 UPDATE_RC();
1583 if (rc == VINF_EM_NO_MEMORY)
1584 return rc;
1585 }
1586
1587 /* check that we got them all */
1588 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1589 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1590 }
1591
1592 /*
1593 * Normal priority then.
1594 * (Executed in no particular order.)
1595 */
1596 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1597 {
1598 /*
1599 * PDM Queues are pending.
1600 */
1601 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1602 PDMR3QueueFlushAll(pVM);
1603
1604 /*
1605 * PDM DMA transfers are pending.
1606 */
1607 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1608 PDMR3DmaRun(pVM);
1609
1610 /*
1611 * EMT Rendezvous (make sure they are handled before the requests).
1612 */
1613 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1614 {
1615 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1616 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1617 UPDATE_RC();
1618 /** @todo HACK ALERT! The following test is to make sure EM+TM
1619 * thinks the VM is stopped/reset before the next VM state change
1620 * is made. We need a better solution for this, or at least make it
1621 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1622 * VINF_EM_SUSPEND). */
1623 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1624 {
1625 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1626 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1627 return rc;
1628 }
1629 }
1630
1631 /*
1632 * Requests from other threads.
1633 */
1634 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1635 {
1636 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1637 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1638 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1639 {
1640 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1641 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1642 return rc2;
1643 }
1644 UPDATE_RC();
1645 /** @todo HACK ALERT! The following test is to make sure EM+TM
1646 * thinks the VM is stopped/reset before the next VM state change
1647 * is made. We need a better solution for this, or at least make it
1648 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1649 * VINF_EM_SUSPEND). */
1650 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1651 {
1652 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1653 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1654 return rc;
1655 }
1656 }
1657
1658 /* check that we got them all */
1659 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1660 }
1661
1662 /*
1663 * Normal priority then. (per-VCPU)
1664 * (Executed in no particular order.)
1665 */
1666 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1667 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1668 {
1669 /*
1670 * Requests from other threads.
1671 */
1672 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1673 {
1674 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1675 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1676 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1677 {
1678 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1679 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1680 return rc2;
1681 }
1682 UPDATE_RC();
1683 /** @todo HACK ALERT! The following test is to make sure EM+TM
1684 * thinks the VM is stopped/reset before the next VM state change
1685 * is made. We need a better solution for this, or at least make it
1686 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1687 * VINF_EM_SUSPEND). */
1688 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1689 {
1690 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1691 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1692 return rc;
1693 }
1694 }
1695
1696 /* check that we got them all */
1697 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1698 }
1699
1700 /*
1701 * High priority pre execution chunk last.
1702 * (Executed in ascending priority order.)
1703 */
1704 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1705 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1706 {
1707 /*
1708 * Timers before interrupts.
1709 */
1710 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1711 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1712 TMR3TimerQueuesDo(pVM);
1713
1714 /*
1715 * Pick up asynchronously posted interrupts into the APIC.
1716 */
1717 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1718 APICUpdatePendingInterrupts(pVCpu);
1719
1720 /*
1721 * The instruction following an emulated STI should *always* be executed!
1722 *
1723 * Note! We intentionally don't clear CPUMCTX_INHIBIT_INT here if
1724 * the eip is the same as the inhibited instr address. Before we
1725 * are able to execute this instruction in raw mode (iret to
1726 * guest code) an external interrupt might force a world switch
1727 * again. Possibly allowing a guest interrupt to be dispatched
1728 * in the process. This could break the guest. Sounds very
1729 * unlikely, but such timing sensitive problem are not as rare as
1730 * you might think.
1731 *
1732 * Note! This used to be a force action flag. Can probably ditch this code.
1733 */
1734 if ( CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx)
1735 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1736 {
1737 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_INHIBIT_INT);
1738 if (CPUMGetGuestRIP(pVCpu) != pVCpu->cpum.GstCtx.uRipInhibitInt)
1739 {
1740 CPUMClearInterruptShadow(&pVCpu->cpum.GstCtx);
1741 Log(("Clearing CPUMCTX_INHIBIT_INT at %RGv - successor %RGv\n",
1742 (RTGCPTR)CPUMGetGuestRIP(pVCpu), (RTGCPTR)pVCpu->cpum.GstCtx.uRipInhibitInt));
1743 }
1744 else
1745 Log(("Leaving CPUMCTX_INHIBIT_INT set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1746 }
1747
1748 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1749 * delivered. */
1750
1751#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1752 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
1753 {
1754 /*
1755 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1756 * Takes priority over even SMI and INIT signals.
1757 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1758 */
1759 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1760 {
1761 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1762 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1763 UPDATE_RC();
1764 }
1765
1766 /*
1767 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1768 * Takes priority over "Traps on the previous instruction".
1769 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1770 */
1771 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1772 {
1773 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1774 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1775 UPDATE_RC();
1776 }
1777
1778 /*
1779 * VMX Nested-guest preemption timer VM-exit.
1780 * Takes priority over NMI-window VM-exits.
1781 */
1782 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1783 {
1784 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1785 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1786 UPDATE_RC();
1787 }
1788 Assert(!VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER));
1789 }
1790#endif
1791
1792 /*
1793 * Guest event injection.
1794 */
1795 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1796 bool fWakeupPending = false;
1797 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW | VMCPU_FF_VMX_INT_WINDOW
1798 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_NESTED_GUEST
1799 | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1800 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1801 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1802 && !CPUMIsInInterruptShadow(&pVCpu->cpum.GstCtx) /* Interrupt shadows block both NMIs and interrupts. */
1803 /** @todo r=bird: But interrupt shadows probably do not block vmexits due to host interrupts... */
1804 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1805 {
1806 if (CPUMGetGuestGif(&pVCpu->cpum.GstCtx))
1807 {
1808 bool fInVmxNonRootMode;
1809 bool fInSvmHwvirtMode;
1810 if (!CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx))
1811 {
1812 fInVmxNonRootMode = false;
1813 fInSvmHwvirtMode = false;
1814 }
1815 else
1816 {
1817 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1818 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1819 }
1820
1821 if (0)
1822 { }
1823#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1824 /*
1825 * VMX NMI-window VM-exit.
1826 * Takes priority over non-maskable interrupts (NMIs).
1827 * Interrupt shadows block NMI-window VM-exits.
1828 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1829 *
1830 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1831 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1832 */
1833 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1834 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1835 {
1836 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1837 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1838 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1839 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1840 && rc2 != VINF_VMX_VMEXIT
1841 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1842 UPDATE_RC();
1843 }
1844#endif
1845 /*
1846 * NMIs (take priority over external interrupts).
1847 */
1848 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1849 && !CPUMAreInterruptsInhibitedByNmi(&pVCpu->cpum.GstCtx))
1850 {
1851#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1852 if ( fInVmxNonRootMode
1853 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1854 {
1855 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1856 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1857 UPDATE_RC();
1858 }
1859 else
1860#endif
1861#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1862 if ( fInSvmHwvirtMode
1863 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1864 {
1865 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1866 AssertMsg( rc2 != VINF_SVM_VMEXIT
1867 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1868 UPDATE_RC();
1869 }
1870 else
1871#endif
1872 {
1873 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1874 if (rc2 == VINF_SUCCESS)
1875 {
1876 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1877 fWakeupPending = true;
1878 if (pVM->em.s.fIemExecutesAll)
1879 rc2 = VINF_EM_RESCHEDULE;
1880 else
1881 {
1882 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1883 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1884 : VINF_EM_RESCHEDULE_REM;
1885 }
1886 }
1887 UPDATE_RC();
1888 }
1889 }
1890#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1891 /*
1892 * VMX Interrupt-window VM-exits.
1893 * Takes priority over external interrupts.
1894 */
1895 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
1896 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1897 {
1898 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
1899 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1900 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
1901 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1902 && rc2 != VINF_VMX_VMEXIT
1903 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1904 UPDATE_RC();
1905 }
1906#endif
1907#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1908 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
1909 * actually pending like we currently do. */
1910#endif
1911 /*
1912 * External interrupts.
1913 */
1914 else
1915 {
1916 /*
1917 * VMX: virtual interrupts takes priority over physical interrupts.
1918 * SVM: physical interrupts takes priority over virtual interrupts.
1919 */
1920 if ( fInVmxNonRootMode
1921 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1922 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
1923 {
1924 /** @todo NSTVMX: virtual-interrupt delivery. */
1925 rc2 = VINF_SUCCESS;
1926 }
1927 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1928 && CPUMIsGuestPhysIntrEnabled(pVCpu))
1929 {
1930 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1931 if (fInVmxNonRootMode)
1932 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
1933 else if (fInSvmHwvirtMode)
1934 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
1935 else
1936 rc2 = VINF_NO_CHANGE;
1937
1938 if (rc2 == VINF_NO_CHANGE)
1939 {
1940 bool fInjected = false;
1941 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1942 /** @todo this really isn't nice, should properly handle this */
1943 /* Note! This can still cause a VM-exit (on Intel). */
1944 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
1945 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
1946 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
1947 fWakeupPending = true;
1948 if ( pVM->em.s.fIemExecutesAll
1949 && ( rc2 == VINF_EM_RESCHEDULE_REM
1950 || rc2 == VINF_EM_RESCHEDULE_HM
1951 || rc2 == VINF_EM_RESCHEDULE_RAW))
1952 {
1953 rc2 = VINF_EM_RESCHEDULE;
1954 }
1955#ifdef VBOX_STRICT
1956 if (fInjected)
1957 rcIrq = rc2;
1958#endif
1959 }
1960 UPDATE_RC();
1961 }
1962 else if ( fInSvmHwvirtMode
1963 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1964 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
1965 {
1966 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
1967 if (rc2 == VINF_NO_CHANGE)
1968 {
1969 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1970 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
1971 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
1972 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1973 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1974 rc2 = VINF_EM_RESCHEDULE;
1975#ifdef VBOX_STRICT
1976 rcIrq = rc2;
1977#endif
1978 }
1979 UPDATE_RC();
1980 }
1981 }
1982 } /* CPUMGetGuestGif */
1983 }
1984
1985 /*
1986 * Allocate handy pages.
1987 */
1988 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1989 {
1990 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1991 UPDATE_RC();
1992 }
1993
1994 /*
1995 * Debugger Facility request.
1996 */
1997 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1998 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1999 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2000 {
2001 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2002 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2003 UPDATE_RC();
2004 }
2005
2006 /*
2007 * EMT Rendezvous (must be serviced before termination).
2008 */
2009 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2010 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2011 {
2012 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2013 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2014 UPDATE_RC();
2015 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2016 * stopped/reset before the next VM state change is made. We need a better
2017 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2018 * && rc >= VINF_EM_SUSPEND). */
2019 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2020 {
2021 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2022 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2023 return rc;
2024 }
2025 }
2026
2027 /*
2028 * State change request (cleared by vmR3SetStateLocked).
2029 */
2030 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2031 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2032 {
2033 VMSTATE enmState = VMR3GetState(pVM);
2034 switch (enmState)
2035 {
2036 case VMSTATE_FATAL_ERROR:
2037 case VMSTATE_FATAL_ERROR_LS:
2038 case VMSTATE_GURU_MEDITATION:
2039 case VMSTATE_GURU_MEDITATION_LS:
2040 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2041 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2042 return VINF_EM_SUSPEND;
2043
2044 case VMSTATE_DESTROYING:
2045 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2046 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2047 return VINF_EM_TERMINATE;
2048
2049 default:
2050 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2051 }
2052 }
2053
2054 /*
2055 * Out of memory? Since most of our fellow high priority actions may cause us
2056 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2057 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2058 * than us since we can terminate without allocating more memory.
2059 */
2060 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2061 {
2062 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2063 UPDATE_RC();
2064 if (rc == VINF_EM_NO_MEMORY)
2065 return rc;
2066 }
2067
2068 /*
2069 * If the virtual sync clock is still stopped, make TM restart it.
2070 */
2071 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2072 TMR3VirtualSyncFF(pVM, pVCpu);
2073
2074#ifdef DEBUG
2075 /*
2076 * Debug, pause the VM.
2077 */
2078 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2079 {
2080 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2081 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2082 return VINF_EM_SUSPEND;
2083 }
2084#endif
2085
2086 /* check that we got them all */
2087 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2088 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2089 }
2090
2091#undef UPDATE_RC
2092 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2093 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2094 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2095 return rc;
2096}
2097
2098
2099/**
2100 * Check if the preset execution time cap restricts guest execution scheduling.
2101 *
2102 * @returns true if allowed, false otherwise
2103 * @param pVM The cross context VM structure.
2104 * @param pVCpu The cross context virtual CPU structure.
2105 */
2106bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2107{
2108 uint64_t u64UserTime, u64KernelTime;
2109
2110 if ( pVM->uCpuExecutionCap != 100
2111 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2112 {
2113 uint64_t u64TimeNow = RTTimeMilliTS();
2114 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2115 {
2116 /* New time slice. */
2117 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2118 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2119 pVCpu->em.s.u64TimeSliceExec = 0;
2120 }
2121 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2122
2123 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2124 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2125 return false;
2126 }
2127 return true;
2128}
2129
2130
2131/**
2132 * Execute VM.
2133 *
2134 * This function is the main loop of the VM. The emulation thread
2135 * calls this function when the VM has been successfully constructed
2136 * and we're ready for executing the VM.
2137 *
2138 * Returning from this function means that the VM is turned off or
2139 * suspended (state already saved) and deconstruction is next in line.
2140 *
2141 * All interaction from other thread are done using forced actions
2142 * and signalling of the wait object.
2143 *
2144 * @returns VBox status code, informational status codes may indicate failure.
2145 * @param pVM The cross context VM structure.
2146 * @param pVCpu The cross context virtual CPU structure.
2147 */
2148VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2149{
2150 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2151 pVM,
2152 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2153 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2154 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2155 VM_ASSERT_EMT(pVM);
2156 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2157 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2158 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2159 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2160
2161 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2162 if (rc == 0)
2163 {
2164 /*
2165 * Start the virtual time.
2166 */
2167 TMR3NotifyResume(pVM, pVCpu);
2168
2169 /*
2170 * The Outer Main Loop.
2171 */
2172 bool fFFDone = false;
2173
2174 /* Reschedule right away to start in the right state. */
2175 rc = VINF_SUCCESS;
2176
2177 /* If resuming after a pause or a state load, restore the previous
2178 state or else we'll start executing code. Else, just reschedule. */
2179 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2180 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2181 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2182 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2183 else
2184 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2185 pVCpu->em.s.cIemThenRemInstructions = 0;
2186 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2187
2188 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2189 for (;;)
2190 {
2191 /*
2192 * Before we can schedule anything (we're here because
2193 * scheduling is required) we must service any pending
2194 * forced actions to avoid any pending action causing
2195 * immediate rescheduling upon entering an inner loop
2196 *
2197 * Do forced actions.
2198 */
2199 if ( !fFFDone
2200 && RT_SUCCESS(rc)
2201 && rc != VINF_EM_TERMINATE
2202 && rc != VINF_EM_OFF
2203 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2204 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2205 {
2206 rc = emR3ForcedActions(pVM, pVCpu, rc);
2207 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2208 }
2209 else if (fFFDone)
2210 fFFDone = false;
2211
2212#ifdef VBOX_STRICT
2213 CPUMAssertGuestRFlagsCookie(pVM, pVCpu);
2214#endif
2215
2216 /*
2217 * Now what to do?
2218 */
2219 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2220 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2221 switch (rc)
2222 {
2223 /*
2224 * Keep doing what we're currently doing.
2225 */
2226 case VINF_SUCCESS:
2227 break;
2228
2229 /*
2230 * Reschedule - to raw-mode execution.
2231 */
2232/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2233 case VINF_EM_RESCHEDULE_RAW:
2234 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2235 AssertLogRelFailed();
2236 pVCpu->em.s.enmState = EMSTATE_NONE;
2237 break;
2238
2239 /*
2240 * Reschedule - to HM or NEM.
2241 */
2242 case VINF_EM_RESCHEDULE_HM:
2243 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2244 if (VM_IS_HM_ENABLED(pVM))
2245 {
2246 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
2247 {
2248 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2249 pVCpu->em.s.enmState = EMSTATE_HM;
2250 }
2251 else
2252 {
2253 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_IEM_THEN_REM)\n", enmOldState, EMSTATE_IEM_THEN_REM));
2254 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2255 }
2256 }
2257 else if (VM_IS_NEM_ENABLED(pVM))
2258 {
2259 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2260 pVCpu->em.s.enmState = EMSTATE_NEM;
2261 }
2262 else
2263 {
2264 AssertLogRelFailed();
2265 pVCpu->em.s.enmState = EMSTATE_NONE;
2266 }
2267 break;
2268
2269 /*
2270 * Reschedule - to recompiled execution.
2271 */
2272 case VINF_EM_RESCHEDULE_REM:
2273 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2274 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2275 enmOldState, EMSTATE_IEM_THEN_REM));
2276 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2277 {
2278 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2279 pVCpu->em.s.cIemThenRemInstructions = 0;
2280 }
2281 break;
2282
2283 /*
2284 * Resume.
2285 */
2286 case VINF_EM_RESUME:
2287 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2288 /* Don't reschedule in the halted or wait for SIPI case. */
2289 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2290 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2291 {
2292 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2293 break;
2294 }
2295 /* fall through and get scheduled. */
2296 RT_FALL_THRU();
2297
2298 /*
2299 * Reschedule.
2300 */
2301 case VINF_EM_RESCHEDULE:
2302 {
2303 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2304 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2305 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2306 pVCpu->em.s.cIemThenRemInstructions = 0;
2307 pVCpu->em.s.enmState = enmState;
2308 break;
2309 }
2310
2311 /*
2312 * Halted.
2313 */
2314 case VINF_EM_HALT:
2315 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2316 pVCpu->em.s.enmState = EMSTATE_HALTED;
2317 break;
2318
2319 /*
2320 * Switch to the wait for SIPI state (application processor only)
2321 */
2322 case VINF_EM_WAIT_SIPI:
2323 Assert(pVCpu->idCpu != 0);
2324 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2325 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2326 break;
2327
2328
2329 /*
2330 * Suspend.
2331 */
2332 case VINF_EM_SUSPEND:
2333 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2334 Assert(enmOldState != EMSTATE_SUSPENDED);
2335 pVCpu->em.s.enmPrevState = enmOldState;
2336 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2337 break;
2338
2339 /*
2340 * Reset.
2341 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2342 */
2343 case VINF_EM_RESET:
2344 {
2345 if (pVCpu->idCpu == 0)
2346 {
2347 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2348 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2349 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2350 pVCpu->em.s.cIemThenRemInstructions = 0;
2351 pVCpu->em.s.enmState = enmState;
2352 }
2353 else
2354 {
2355 /* All other VCPUs go into the wait for SIPI state. */
2356 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2357 }
2358 break;
2359 }
2360
2361 /*
2362 * Power Off.
2363 */
2364 case VINF_EM_OFF:
2365 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2366 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2367 TMR3NotifySuspend(pVM, pVCpu);
2368 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2369 return rc;
2370
2371 /*
2372 * Terminate the VM.
2373 */
2374 case VINF_EM_TERMINATE:
2375 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2376 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2377 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2378 TMR3NotifySuspend(pVM, pVCpu);
2379 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2380 return rc;
2381
2382
2383 /*
2384 * Out of memory, suspend the VM and stuff.
2385 */
2386 case VINF_EM_NO_MEMORY:
2387 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2388 Assert(enmOldState != EMSTATE_SUSPENDED);
2389 pVCpu->em.s.enmPrevState = enmOldState;
2390 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2391 TMR3NotifySuspend(pVM, pVCpu);
2392 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2393
2394 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2395 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2396 if (rc != VINF_EM_SUSPEND)
2397 {
2398 if (RT_SUCCESS_NP(rc))
2399 {
2400 AssertLogRelMsgFailed(("%Rrc\n", rc));
2401 rc = VERR_EM_INTERNAL_ERROR;
2402 }
2403 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2404 }
2405 return rc;
2406
2407 /*
2408 * Guest debug events.
2409 */
2410 case VINF_EM_DBG_STEPPED:
2411 case VINF_EM_DBG_STOP:
2412 case VINF_EM_DBG_EVENT:
2413 case VINF_EM_DBG_BREAKPOINT:
2414 case VINF_EM_DBG_STEP:
2415 if (enmOldState == EMSTATE_RAW)
2416 {
2417 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2418 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2419 }
2420 else if (enmOldState == EMSTATE_HM)
2421 {
2422 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2423 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2424 }
2425 else if (enmOldState == EMSTATE_NEM)
2426 {
2427 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2428 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2429 }
2430 else if (enmOldState == EMSTATE_REM)
2431 {
2432 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2433 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2434 }
2435 else
2436 {
2437 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2438 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2439 }
2440 break;
2441
2442 /*
2443 * Hypervisor debug events.
2444 */
2445 case VINF_EM_DBG_HYPER_STEPPED:
2446 case VINF_EM_DBG_HYPER_BREAKPOINT:
2447 case VINF_EM_DBG_HYPER_ASSERTION:
2448 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2449 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2450 break;
2451
2452 /*
2453 * Triple fault.
2454 */
2455 case VINF_EM_TRIPLE_FAULT:
2456 if (!pVM->em.s.fGuruOnTripleFault)
2457 {
2458 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2459 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2460 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2461 continue;
2462 }
2463 /* Else fall through and trigger a guru. */
2464 RT_FALL_THRU();
2465
2466 case VERR_VMM_RING0_ASSERTION:
2467 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2468 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2469 break;
2470
2471 /*
2472 * Any error code showing up here other than the ones we
2473 * know and process above are considered to be FATAL.
2474 *
2475 * Unknown warnings and informational status codes are also
2476 * included in this.
2477 */
2478 default:
2479 if (RT_SUCCESS_NP(rc))
2480 {
2481 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2482 rc = VERR_EM_INTERNAL_ERROR;
2483 }
2484 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2485 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2486 break;
2487 }
2488
2489 /*
2490 * Act on state transition.
2491 */
2492 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2493 if (enmOldState != enmNewState)
2494 {
2495 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2496
2497 /* Clear MWait flags and the unhalt FF. */
2498 if ( enmOldState == EMSTATE_HALTED
2499 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2500 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2501 && ( enmNewState == EMSTATE_RAW
2502 || enmNewState == EMSTATE_HM
2503 || enmNewState == EMSTATE_NEM
2504 || enmNewState == EMSTATE_REM
2505 || enmNewState == EMSTATE_IEM_THEN_REM
2506 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2507 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2508 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2509 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2510 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2511 {
2512 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2513 {
2514 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2515 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2516 }
2517 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2518 {
2519 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2520 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2521 }
2522 }
2523 }
2524 else
2525 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2526
2527 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2528 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2529
2530 /*
2531 * Act on the new state.
2532 */
2533 switch (enmNewState)
2534 {
2535 /*
2536 * Execute raw.
2537 */
2538 case EMSTATE_RAW:
2539 AssertLogRelMsgFailed(("%Rrc\n", rc));
2540 rc = VERR_EM_INTERNAL_ERROR;
2541 break;
2542
2543 /*
2544 * Execute hardware accelerated raw.
2545 */
2546 case EMSTATE_HM:
2547 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2548 break;
2549
2550 /*
2551 * Execute hardware accelerated raw.
2552 */
2553 case EMSTATE_NEM:
2554 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2555 break;
2556
2557 /*
2558 * Execute recompiled.
2559 */
2560 case EMSTATE_REM:
2561 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2562 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2563 break;
2564
2565 /*
2566 * Execute in the interpreter.
2567 */
2568 case EMSTATE_IEM:
2569 {
2570 uint32_t cInstructions = 0;
2571#if 0 /* For testing purposes. */
2572 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2573 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2574 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2575 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2576 rc = VINF_SUCCESS;
2577 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2578#endif
2579 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2580 if (pVM->em.s.fIemExecutesAll)
2581 {
2582 Assert(rc != VINF_EM_RESCHEDULE_REM);
2583 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2584 Assert(rc != VINF_EM_RESCHEDULE_HM);
2585#ifdef VBOX_HIGH_RES_TIMERS_HACK
2586 if (cInstructions < 2048)
2587 TMTimerPollVoid(pVM, pVCpu);
2588#endif
2589 }
2590 fFFDone = false;
2591 break;
2592 }
2593
2594 /*
2595 * Execute in IEM, hoping we can quickly switch aback to HM
2596 * or RAW execution. If our hopes fail, we go to REM.
2597 */
2598 case EMSTATE_IEM_THEN_REM:
2599 {
2600 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2601 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2602 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2603 break;
2604 }
2605
2606 /*
2607 * Application processor execution halted until SIPI.
2608 */
2609 case EMSTATE_WAIT_SIPI:
2610 /* no break */
2611 /*
2612 * hlt - execution halted until interrupt.
2613 */
2614 case EMSTATE_HALTED:
2615 {
2616 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2617 /* If HM (or someone else) store a pending interrupt in
2618 TRPM, it must be dispatched ASAP without any halting.
2619 Anything pending in TRPM has been accepted and the CPU
2620 should already be the right state to receive it. */
2621 if (TRPMHasTrap(pVCpu))
2622 rc = VINF_EM_RESCHEDULE;
2623 /* MWAIT has a special extension where it's woken up when
2624 an interrupt is pending even when IF=0. */
2625 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2626 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2627 {
2628 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2629 if (rc == VINF_SUCCESS)
2630 {
2631 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2632 APICUpdatePendingInterrupts(pVCpu);
2633
2634 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2635 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2636 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2637 {
2638 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2639 rc = VINF_EM_RESCHEDULE;
2640 }
2641 }
2642 }
2643 else
2644 {
2645 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2646 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2647 check VMCPU_FF_UPDATE_APIC here. */
2648 if ( rc == VINF_SUCCESS
2649 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2650 {
2651 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2652 rc = VINF_EM_RESCHEDULE;
2653 }
2654 }
2655
2656 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2657 break;
2658 }
2659
2660 /*
2661 * Suspended - return to VM.cpp.
2662 */
2663 case EMSTATE_SUSPENDED:
2664 TMR3NotifySuspend(pVM, pVCpu);
2665 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2666 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2667 return VINF_EM_SUSPEND;
2668
2669 /*
2670 * Debugging in the guest.
2671 */
2672 case EMSTATE_DEBUG_GUEST_RAW:
2673 case EMSTATE_DEBUG_GUEST_HM:
2674 case EMSTATE_DEBUG_GUEST_NEM:
2675 case EMSTATE_DEBUG_GUEST_IEM:
2676 case EMSTATE_DEBUG_GUEST_REM:
2677 TMR3NotifySuspend(pVM, pVCpu);
2678 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2679 TMR3NotifyResume(pVM, pVCpu);
2680 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2681 break;
2682
2683 /*
2684 * Debugging in the hypervisor.
2685 */
2686 case EMSTATE_DEBUG_HYPER:
2687 {
2688 TMR3NotifySuspend(pVM, pVCpu);
2689 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2690
2691 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2692 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2693 if (rc != VINF_SUCCESS)
2694 {
2695 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2696 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2697 else
2698 {
2699 /* switch to guru meditation mode */
2700 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2701 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2702 VMMR3FatalDump(pVM, pVCpu, rc);
2703 }
2704 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2705 return rc;
2706 }
2707
2708 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2709 TMR3NotifyResume(pVM, pVCpu);
2710 break;
2711 }
2712
2713 /*
2714 * Guru meditation takes place in the debugger.
2715 */
2716 case EMSTATE_GURU_MEDITATION:
2717 {
2718 TMR3NotifySuspend(pVM, pVCpu);
2719 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2720 VMMR3FatalDump(pVM, pVCpu, rc);
2721 emR3Debug(pVM, pVCpu, rc);
2722 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2723 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2724 return rc;
2725 }
2726
2727 /*
2728 * The states we don't expect here.
2729 */
2730 case EMSTATE_NONE:
2731 case EMSTATE_TERMINATING:
2732 default:
2733 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2734 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2735 TMR3NotifySuspend(pVM, pVCpu);
2736 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2737 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2738 return VERR_EM_INTERNAL_ERROR;
2739 }
2740 } /* The Outer Main Loop */
2741 }
2742 else
2743 {
2744 /*
2745 * Fatal error.
2746 */
2747 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2748 TMR3NotifySuspend(pVM, pVCpu);
2749 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2750 VMMR3FatalDump(pVM, pVCpu, rc);
2751 emR3Debug(pVM, pVCpu, rc);
2752 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2753 /** @todo change the VM state! */
2754 return rc;
2755 }
2756
2757 /* not reached */
2758}
2759
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette