VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 93876

Last change on this file since 93876 was 93718, checked in by vboxsync, 3 years ago

VMM/MM: Removed the hyper heap. bugref:10093 bugref:9517

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 116.6 KB
Line 
1/* $Id: EM.cpp 93718 2022-02-14 11:09:36Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#include <VBox/vmm/apic.h>
50#include <VBox/vmm/tm.h>
51#include <VBox/vmm/mm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/pdmapi.h>
54#include <VBox/vmm/pdmcritsect.h>
55#include <VBox/vmm/pdmqueue.h>
56#include <VBox/vmm/hm.h>
57#include "EMInternal.h"
58#include <VBox/vmm/vm.h>
59#include <VBox/vmm/uvm.h>
60#include <VBox/vmm/cpumdis.h>
61#include <VBox/dis.h>
62#include <VBox/disopcode.h>
63#include <VBox/err.h>
64#include "VMMTracing.h"
65
66#include <iprt/asm.h>
67#include <iprt/string.h>
68#include <iprt/stream.h>
69#include <iprt/thread.h>
70
71
72/*********************************************************************************************************************************
73* Internal Functions *
74*********************************************************************************************************************************/
75static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
76static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
77#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
78static const char *emR3GetStateName(EMSTATE enmState);
79#endif
80static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
81#if defined(VBOX_WITH_REM) || defined(DEBUG)
82static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
83#endif
84static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
85
86
87/**
88 * Initializes the EM.
89 *
90 * @returns VBox status code.
91 * @param pVM The cross context VM structure.
92 */
93VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
94{
95 LogFlow(("EMR3Init\n"));
96 /*
97 * Assert alignment and sizes.
98 */
99 AssertCompileMemberAlignment(VM, em.s, 32);
100 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
101 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
102 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
103
104 /*
105 * Init the structure.
106 */
107 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
108 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
109
110 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
111 AssertLogRelRCReturn(rc, rc);
112
113 bool fEnabled;
114 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
115 AssertLogRelRCReturn(rc, rc);
116 pVM->em.s.fGuruOnTripleFault = !fEnabled;
117 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
118 {
119 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
120 pVM->em.s.fGuruOnTripleFault = true;
121 }
122
123 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
124
125 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
126 * Whether to try correlate exit history in any context, detect hot spots and
127 * try optimize these using IEM if there are other exits close by. This
128 * overrides the context specific settings. */
129 bool fExitOptimizationEnabled = true;
130 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
131 AssertLogRelRCReturn(rc, rc);
132
133 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
134 * Whether to optimize exits in ring-0. Setting this to false will also disable
135 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
136 * capabilities of the host kernel, this optimization may be unavailable. */
137 bool fExitOptimizationEnabledR0 = true;
138 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
139 AssertLogRelRCReturn(rc, rc);
140 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
141
142 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
143 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
144 * hooks are in effect). */
145 /** @todo change the default to true here */
146 bool fExitOptimizationEnabledR0PreemptDisabled = true;
147 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
148 AssertLogRelRCReturn(rc, rc);
149 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
150
151 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
152 * Maximum number of instruction to let EMHistoryExec execute in one go. */
153 uint16_t cHistoryExecMaxInstructions = 8192;
154 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
155 AssertLogRelRCReturn(rc, rc);
156 if (cHistoryExecMaxInstructions < 16)
157 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
158
159 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
160 * Maximum number of instruction between exits during probing. */
161 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
162#ifdef RT_OS_WINDOWS
163 if (VM_IS_NEM_ENABLED(pVM))
164 cHistoryProbeMaxInstructionsWithoutExit = 32;
165#endif
166 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
167 cHistoryProbeMaxInstructionsWithoutExit);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
171 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
172
173 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
174 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
175 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
176 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
177 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
178 cHistoryProbeMinInstructions);
179 AssertLogRelRCReturn(rc, rc);
180
181 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
182 {
183 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
184 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
185 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
186 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
187 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
188 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
189 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
190 }
191
192 /*
193 * Saved state.
194 */
195 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
196 NULL, NULL, NULL,
197 NULL, emR3Save, NULL,
198 NULL, emR3Load, NULL);
199 if (RT_FAILURE(rc))
200 return rc;
201
202 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
203 {
204 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
205
206 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
207 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
208 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
209 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
210
211# define EM_REG_COUNTER(a, b, c) \
212 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
213 AssertRC(rc);
214
215# define EM_REG_COUNTER_USED(a, b, c) \
216 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
217 AssertRC(rc);
218
219# define EM_REG_PROFILE(a, b, c) \
220 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
221 AssertRC(rc);
222
223# define EM_REG_PROFILE_ADV(a, b, c) \
224 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
225 AssertRC(rc);
226
227 /*
228 * Statistics.
229 */
230#ifdef VBOX_WITH_STATISTICS
231 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
232 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
233
234 /* these should be considered for release statistics. */
235 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
236 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
237 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
238 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
239 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
240 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
241 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
242 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
243#endif /* VBOX_WITH_STATISTICS */
244 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
245 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
246#ifdef VBOX_WITH_STATISTICS
247 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
248 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
249 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
250 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
251 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
252 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
253#endif /* VBOX_WITH_STATISTICS */
254
255 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
257 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
258 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
259 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
260
261 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
262
263 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
264 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
265 AssertRC(rc);
266
267 /* History record statistics */
268 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
269 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
270 AssertRC(rc);
271
272 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
273 {
274 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
275 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
276 AssertRC(rc);
277 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
278 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
279 AssertRC(rc);
280 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
281 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
282 AssertRC(rc);
283 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
284 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
285 AssertRC(rc);
286 }
287
288 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
289 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
290 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
291 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
292 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
293 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
294 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
295 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
296 }
297
298 emR3InitDbg(pVM);
299 return VINF_SUCCESS;
300}
301
302
303/**
304 * Called when a VM initialization stage is completed.
305 *
306 * @returns VBox status code.
307 * @param pVM The cross context VM structure.
308 * @param enmWhat The initialization state that was completed.
309 */
310VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
311{
312 if (enmWhat == VMINITCOMPLETED_RING0)
313 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
314 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
315 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
316 return VINF_SUCCESS;
317}
318
319
320/**
321 * Applies relocations to data and code managed by this
322 * component. This function will be called at init and
323 * whenever the VMM need to relocate it self inside the GC.
324 *
325 * @param pVM The cross context VM structure.
326 */
327VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
328{
329 LogFlow(("EMR3Relocate\n"));
330 RT_NOREF(pVM);
331}
332
333
334/**
335 * Reset the EM state for a CPU.
336 *
337 * Called by EMR3Reset and hot plugging.
338 *
339 * @param pVCpu The cross context virtual CPU structure.
340 */
341VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
342{
343 /* Reset scheduling state. */
344 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
345
346 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
347 out of the HALTED state here so that enmPrevState doesn't end up as
348 HALTED when EMR3Execute returns. */
349 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
350 {
351 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
352 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
353 }
354}
355
356
357/**
358 * Reset notification.
359 *
360 * @param pVM The cross context VM structure.
361 */
362VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
363{
364 Log(("EMR3Reset: \n"));
365 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
366 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
367}
368
369
370/**
371 * Terminates the EM.
372 *
373 * Termination means cleaning up and freeing all resources,
374 * the VM it self is at this point powered off or suspended.
375 *
376 * @returns VBox status code.
377 * @param pVM The cross context VM structure.
378 */
379VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
380{
381 RT_NOREF(pVM);
382 return VINF_SUCCESS;
383}
384
385
386/**
387 * Execute state save operation.
388 *
389 * @returns VBox status code.
390 * @param pVM The cross context VM structure.
391 * @param pSSM SSM operation handle.
392 */
393static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
394{
395 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
396 {
397 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
398
399 SSMR3PutBool(pSSM, false /*fForceRAW*/);
400
401 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
402 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
403 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
404
405 /* Save mwait state. */
406 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
407 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
408 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
409 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
410 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
411 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
412 AssertRCReturn(rc, rc);
413 }
414 return VINF_SUCCESS;
415}
416
417
418/**
419 * Execute state load operation.
420 *
421 * @returns VBox status code.
422 * @param pVM The cross context VM structure.
423 * @param pSSM SSM operation handle.
424 * @param uVersion Data layout version.
425 * @param uPass The data pass.
426 */
427static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
428{
429 /*
430 * Validate version.
431 */
432 if ( uVersion > EM_SAVED_STATE_VERSION
433 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
434 {
435 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
436 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
437 }
438 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
439
440 /*
441 * Load the saved state.
442 */
443 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
444 {
445 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
446
447 bool fForceRAWIgnored;
448 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
449 AssertRCReturn(rc, rc);
450
451 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
452 {
453 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
454 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
455
456 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
457 }
458 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
459 {
460 /* Load mwait state. */
461 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
462 AssertRCReturn(rc, rc);
463 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
464 AssertRCReturn(rc, rc);
465 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
466 AssertRCReturn(rc, rc);
467 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
468 AssertRCReturn(rc, rc);
469 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
470 AssertRCReturn(rc, rc);
471 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
472 AssertRCReturn(rc, rc);
473 }
474 }
475 return VINF_SUCCESS;
476}
477
478
479/**
480 * Argument packet for emR3SetExecutionPolicy.
481 */
482struct EMR3SETEXECPOLICYARGS
483{
484 EMEXECPOLICY enmPolicy;
485 bool fEnforce;
486};
487
488
489/**
490 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
491 */
492static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
493{
494 /*
495 * Only the first CPU changes the variables.
496 */
497 if (pVCpu->idCpu == 0)
498 {
499 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
500 switch (pArgs->enmPolicy)
501 {
502 case EMEXECPOLICY_RECOMPILE_RING0:
503 case EMEXECPOLICY_RECOMPILE_RING3:
504 break;
505 case EMEXECPOLICY_IEM_ALL:
506 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
507
508 /* For making '.alliem 1' useful during debugging, transition the
509 EMSTATE_DEBUG_GUEST_XXX to EMSTATE_DEBUG_GUEST_IEM. */
510 for (VMCPUID i = 0; i < pVM->cCpus; i++)
511 {
512 PVMCPU pVCpuX = pVM->apCpusR3[i];
513 switch (pVCpuX->em.s.enmState)
514 {
515 case EMSTATE_DEBUG_GUEST_RAW:
516 case EMSTATE_DEBUG_GUEST_HM:
517 case EMSTATE_DEBUG_GUEST_NEM:
518 case EMSTATE_DEBUG_GUEST_REM:
519 Log(("EM: idCpu=%u: %s -> EMSTATE_DEBUG_GUEST_IEM\n", i, emR3GetStateName(pVCpuX->em.s.enmState) ));
520 pVCpuX->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
521 break;
522 case EMSTATE_DEBUG_GUEST_IEM:
523 default:
524 break;
525 }
526 }
527 break;
528 default:
529 AssertFailedReturn(VERR_INVALID_PARAMETER);
530 }
531 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
532 }
533
534 /*
535 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
536 */
537 return pVCpu->em.s.enmState == EMSTATE_RAW
538 || pVCpu->em.s.enmState == EMSTATE_HM
539 || pVCpu->em.s.enmState == EMSTATE_NEM
540 || pVCpu->em.s.enmState == EMSTATE_IEM
541 || pVCpu->em.s.enmState == EMSTATE_REM
542 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
543 ? VINF_EM_RESCHEDULE
544 : VINF_SUCCESS;
545}
546
547
548/**
549 * Changes an execution scheduling policy parameter.
550 *
551 * This is used to enable or disable raw-mode / hardware-virtualization
552 * execution of user and supervisor code.
553 *
554 * @returns VINF_SUCCESS on success.
555 * @returns VINF_RESCHEDULE if a rescheduling might be required.
556 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
557 *
558 * @param pUVM The user mode VM handle.
559 * @param enmPolicy The scheduling policy to change.
560 * @param fEnforce Whether to enforce the policy or not.
561 */
562VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
563{
564 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
565 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
566 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
567
568 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
569 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
570}
571
572
573/**
574 * Queries an execution scheduling policy parameter.
575 *
576 * @returns VBox status code
577 * @param pUVM The user mode VM handle.
578 * @param enmPolicy The scheduling policy to query.
579 * @param pfEnforced Where to return the current value.
580 */
581VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
582{
583 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
584 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
585 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
586 PVM pVM = pUVM->pVM;
587 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
588
589 /* No need to bother EMTs with a query. */
590 switch (enmPolicy)
591 {
592 case EMEXECPOLICY_RECOMPILE_RING0:
593 case EMEXECPOLICY_RECOMPILE_RING3:
594 *pfEnforced = false;
595 break;
596 case EMEXECPOLICY_IEM_ALL:
597 *pfEnforced = pVM->em.s.fIemExecutesAll;
598 break;
599 default:
600 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
601 }
602
603 return VINF_SUCCESS;
604}
605
606
607/**
608 * Queries the main execution engine of the VM.
609 *
610 * @returns VBox status code
611 * @param pUVM The user mode VM handle.
612 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
613 */
614VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
615{
616 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
617 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
618
619 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
620 PVM pVM = pUVM->pVM;
621 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
622
623 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Raise a fatal error.
630 *
631 * Safely terminate the VM with full state report and stuff. This function
632 * will naturally never return.
633 *
634 * @param pVCpu The cross context virtual CPU structure.
635 * @param rc VBox status code.
636 */
637VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
638{
639 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
640 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
641}
642
643
644#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
645/**
646 * Gets the EM state name.
647 *
648 * @returns pointer to read only state name,
649 * @param enmState The state.
650 */
651static const char *emR3GetStateName(EMSTATE enmState)
652{
653 switch (enmState)
654 {
655 case EMSTATE_NONE: return "EMSTATE_NONE";
656 case EMSTATE_RAW: return "EMSTATE_RAW";
657 case EMSTATE_HM: return "EMSTATE_HM";
658 case EMSTATE_IEM: return "EMSTATE_IEM";
659 case EMSTATE_REM: return "EMSTATE_REM";
660 case EMSTATE_HALTED: return "EMSTATE_HALTED";
661 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
662 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
663 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
664 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
665 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
666 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
667 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
668 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
669 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
670 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
671 case EMSTATE_NEM: return "EMSTATE_NEM";
672 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
673 default: return "Unknown!";
674 }
675}
676#endif /* LOG_ENABLED || VBOX_STRICT */
677
678
679/**
680 * Handle pending ring-3 I/O port write.
681 *
682 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
683 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
684 *
685 * @returns Strict VBox status code.
686 * @param pVM The cross context VM structure.
687 * @param pVCpu The cross context virtual CPU structure.
688 */
689VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
690{
691 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
692
693 /* Get and clear the pending data. */
694 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
695 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
696 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
697 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
698 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
699
700 /* Assert sanity. */
701 switch (cbValue)
702 {
703 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
704 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
705 case 4: break;
706 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
707 }
708 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
709
710 /* Do the work.*/
711 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
712 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
713 if (IOM_SUCCESS(rcStrict))
714 {
715 pVCpu->cpum.GstCtx.rip += cbInstr;
716 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
717 }
718 return rcStrict;
719}
720
721
722/**
723 * Handle pending ring-3 I/O port write.
724 *
725 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
726 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
727 *
728 * @returns Strict VBox status code.
729 * @param pVM The cross context VM structure.
730 * @param pVCpu The cross context virtual CPU structure.
731 */
732VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
733{
734 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
735
736 /* Get and clear the pending data. */
737 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
738 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
739 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
740 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
741
742 /* Assert sanity. */
743 switch (cbValue)
744 {
745 case 1: break;
746 case 2: break;
747 case 4: break;
748 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
749 }
750 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
751 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
752
753 /* Do the work.*/
754 uint32_t uValue = 0;
755 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
756 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
757 if (IOM_SUCCESS(rcStrict))
758 {
759 if (cbValue == 4)
760 pVCpu->cpum.GstCtx.rax = uValue;
761 else if (cbValue == 2)
762 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
763 else
764 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
765 pVCpu->cpum.GstCtx.rip += cbInstr;
766 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
767 }
768 return rcStrict;
769}
770
771
772/**
773 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
774 * Worker for emR3ExecuteSplitLockInstruction}
775 */
776static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
777{
778 /* Only execute on the specified EMT. */
779 if (pVCpu == (PVMCPU)pvUser)
780 {
781 LogFunc(("\n"));
782 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
783 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
784 if (rcStrict == VINF_IEM_RAISED_XCPT)
785 rcStrict = VINF_SUCCESS;
786 return rcStrict;
787 }
788 RT_NOREF(pVM);
789 return VINF_SUCCESS;
790}
791
792
793/**
794 * Handle an instruction causing a split cacheline lock access in SMP VMs.
795 *
796 * Generally we only get here if the host has split-lock detection enabled and
797 * this caused an \#AC because of something the guest did. If we interpret the
798 * instruction as-is, we'll likely just repeat the split-lock access and
799 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
800 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
801 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
802 * disregard the lock prefix when emulating the instruction.
803 *
804 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
805 * feature when entering guest context, but the support for the feature isn't a
806 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
807 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
808 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
809 * propert detection to SUPDrv later if we find it necessary.
810 *
811 * @see @bugref{10052}
812 *
813 * @returns Strict VBox status code.
814 * @param pVM The cross context VM structure.
815 * @param pVCpu The cross context virtual CPU structure.
816 */
817VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
818{
819 LogFunc(("\n"));
820 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
821}
822
823
824/**
825 * Debug loop.
826 *
827 * @returns VBox status code for EM.
828 * @param pVM The cross context VM structure.
829 * @param pVCpu The cross context virtual CPU structure.
830 * @param rc Current EM VBox status code.
831 */
832static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
833{
834 for (;;)
835 {
836 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
837 const VBOXSTRICTRC rcLast = rc;
838
839 /*
840 * Debug related RC.
841 */
842 switch (VBOXSTRICTRC_VAL(rc))
843 {
844 /*
845 * Single step an instruction.
846 */
847 case VINF_EM_DBG_STEP:
848 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
849 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
850 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
851 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
852 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
853 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
854 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
855#ifdef VBOX_WITH_REM /** @todo fix me? */
856 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
857 rc = emR3RemStep(pVM, pVCpu);
858#endif
859 else
860 {
861 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
862 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
863 rc = VINF_EM_DBG_STEPPED;
864 }
865 break;
866
867 /*
868 * Simple events: stepped, breakpoint, stop/assertion.
869 */
870 case VINF_EM_DBG_STEPPED:
871 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
872 break;
873
874 case VINF_EM_DBG_BREAKPOINT:
875 rc = DBGFR3BpHit(pVM, pVCpu);
876 break;
877
878 case VINF_EM_DBG_STOP:
879 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
880 break;
881
882 case VINF_EM_DBG_EVENT:
883 rc = DBGFR3EventHandlePending(pVM, pVCpu);
884 break;
885
886 case VINF_EM_DBG_HYPER_STEPPED:
887 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
888 break;
889
890 case VINF_EM_DBG_HYPER_BREAKPOINT:
891 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
892 break;
893
894 case VINF_EM_DBG_HYPER_ASSERTION:
895 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
896 RTLogFlush(NULL);
897 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
898 break;
899
900 /*
901 * Guru meditation.
902 */
903 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
904 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
905 break;
906 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
907 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
908 break;
909 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
910 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
911 break;
912
913 default: /** @todo don't use default for guru, but make special errors code! */
914 {
915 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
916 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
917 break;
918 }
919 }
920
921 /*
922 * Process the result.
923 */
924 switch (VBOXSTRICTRC_VAL(rc))
925 {
926 /*
927 * Continue the debugging loop.
928 */
929 case VINF_EM_DBG_STEP:
930 case VINF_EM_DBG_STOP:
931 case VINF_EM_DBG_EVENT:
932 case VINF_EM_DBG_STEPPED:
933 case VINF_EM_DBG_BREAKPOINT:
934 case VINF_EM_DBG_HYPER_STEPPED:
935 case VINF_EM_DBG_HYPER_BREAKPOINT:
936 case VINF_EM_DBG_HYPER_ASSERTION:
937 break;
938
939 /*
940 * Resuming execution (in some form) has to be done here if we got
941 * a hypervisor debug event.
942 */
943 case VINF_SUCCESS:
944 case VINF_EM_RESUME:
945 case VINF_EM_SUSPEND:
946 case VINF_EM_RESCHEDULE:
947 case VINF_EM_RESCHEDULE_RAW:
948 case VINF_EM_RESCHEDULE_REM:
949 case VINF_EM_HALT:
950 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
951 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
952 if (rc == VINF_SUCCESS)
953 rc = VINF_EM_RESCHEDULE;
954 return rc;
955
956 /*
957 * The debugger isn't attached.
958 * We'll simply turn the thing off since that's the easiest thing to do.
959 */
960 case VERR_DBGF_NOT_ATTACHED:
961 switch (VBOXSTRICTRC_VAL(rcLast))
962 {
963 case VINF_EM_DBG_HYPER_STEPPED:
964 case VINF_EM_DBG_HYPER_BREAKPOINT:
965 case VINF_EM_DBG_HYPER_ASSERTION:
966 case VERR_TRPM_PANIC:
967 case VERR_TRPM_DONT_PANIC:
968 case VERR_VMM_RING0_ASSERTION:
969 case VERR_VMM_HYPER_CR3_MISMATCH:
970 case VERR_VMM_RING3_CALL_DISABLED:
971 return rcLast;
972 }
973 return VINF_EM_OFF;
974
975 /*
976 * Status codes terminating the VM in one or another sense.
977 */
978 case VINF_EM_TERMINATE:
979 case VINF_EM_OFF:
980 case VINF_EM_RESET:
981 case VINF_EM_NO_MEMORY:
982 case VINF_EM_RAW_STALE_SELECTOR:
983 case VINF_EM_RAW_IRET_TRAP:
984 case VERR_TRPM_PANIC:
985 case VERR_TRPM_DONT_PANIC:
986 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
987 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
988 case VERR_VMM_RING0_ASSERTION:
989 case VERR_VMM_HYPER_CR3_MISMATCH:
990 case VERR_VMM_RING3_CALL_DISABLED:
991 case VERR_INTERNAL_ERROR:
992 case VERR_INTERNAL_ERROR_2:
993 case VERR_INTERNAL_ERROR_3:
994 case VERR_INTERNAL_ERROR_4:
995 case VERR_INTERNAL_ERROR_5:
996 case VERR_IPE_UNEXPECTED_STATUS:
997 case VERR_IPE_UNEXPECTED_INFO_STATUS:
998 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
999 return rc;
1000
1001 /*
1002 * The rest is unexpected, and will keep us here.
1003 */
1004 default:
1005 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1006 break;
1007 }
1008 } /* debug for ever */
1009}
1010
1011
1012#if defined(VBOX_WITH_REM) || defined(DEBUG)
1013/**
1014 * Steps recompiled code.
1015 *
1016 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1017 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1018 *
1019 * @param pVM The cross context VM structure.
1020 * @param pVCpu The cross context virtual CPU structure.
1021 */
1022static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1023{
1024 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1025
1026 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1027
1028 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1029 return rc;
1030}
1031#endif /* VBOX_WITH_REM || DEBUG */
1032
1033
1034/**
1035 * Executes recompiled code.
1036 *
1037 * This function contains the recompiler version of the inner
1038 * execution loop (the outer loop being in EMR3ExecuteVM()).
1039 *
1040 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1041 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1042 *
1043 * @param pVM The cross context VM structure.
1044 * @param pVCpu The cross context virtual CPU structure.
1045 * @param pfFFDone Where to store an indicator telling whether or not
1046 * FFs were done before returning.
1047 *
1048 */
1049static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1050{
1051#ifdef LOG_ENABLED
1052 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1053
1054 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1055 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1056 else
1057 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1058#endif
1059 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1060
1061 /*
1062 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1063 * or the REM suggests raw-mode execution.
1064 */
1065 *pfFFDone = false;
1066 uint32_t cLoops = 0;
1067 int rc = VINF_SUCCESS;
1068 for (;;)
1069 {
1070 /*
1071 * Execute REM.
1072 */
1073 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1074 {
1075 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1076 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1077 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1078 }
1079 else
1080 {
1081 /* Give up this time slice; virtual time continues */
1082 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1083 RTThreadSleep(5);
1084 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1085 rc = VINF_SUCCESS;
1086 }
1087
1088 /*
1089 * Deal with high priority post execution FFs before doing anything
1090 * else. Sync back the state and leave the lock to be on the safe side.
1091 */
1092 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1093 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1094 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1095
1096 /*
1097 * Process the returned status code.
1098 */
1099 if (rc != VINF_SUCCESS)
1100 {
1101 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1102 break;
1103 if (rc != VINF_REM_INTERRUPED_FF)
1104 {
1105 /* Try dodge unimplemented IEM trouble by reschduling. */
1106 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1107 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1108 {
1109 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1110 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1111 {
1112 rc = VINF_EM_RESCHEDULE;
1113 break;
1114 }
1115 }
1116
1117 /*
1118 * Anything which is not known to us means an internal error
1119 * and the termination of the VM!
1120 */
1121 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1122 break;
1123 }
1124 }
1125
1126
1127 /*
1128 * Check and execute forced actions.
1129 *
1130 * Sync back the VM state and leave the lock before calling any of
1131 * these, you never know what's going to happen here.
1132 */
1133#ifdef VBOX_HIGH_RES_TIMERS_HACK
1134 TMTimerPollVoid(pVM, pVCpu);
1135#endif
1136 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1137 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1138 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1139 {
1140 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1141 rc = emR3ForcedActions(pVM, pVCpu, rc);
1142 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1143 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1144 if ( rc != VINF_SUCCESS
1145 && rc != VINF_EM_RESCHEDULE_REM)
1146 {
1147 *pfFFDone = true;
1148 break;
1149 }
1150 }
1151
1152 /*
1153 * Have to check if we can get back to fast execution mode every so often.
1154 */
1155 if (!(++cLoops & 7))
1156 {
1157 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1158 if ( enmCheck != EMSTATE_REM
1159 && enmCheck != EMSTATE_IEM_THEN_REM)
1160 {
1161 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
1162 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1163 return VINF_EM_RESCHEDULE;
1164 }
1165 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
1166 }
1167
1168 } /* The Inner Loop, recompiled execution mode version. */
1169
1170 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1171 return rc;
1172}
1173
1174
1175#ifdef DEBUG
1176
1177int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1178{
1179 EMSTATE enmOldState = pVCpu->em.s.enmState;
1180
1181 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1182
1183 Log(("Single step BEGIN:\n"));
1184 for (uint32_t i = 0; i < cIterations; i++)
1185 {
1186 DBGFR3PrgStep(pVCpu);
1187 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1188 emR3RemStep(pVM, pVCpu);
1189 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1190 break;
1191 }
1192 Log(("Single step END:\n"));
1193 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1194 pVCpu->em.s.enmState = enmOldState;
1195 return VINF_EM_RESCHEDULE;
1196}
1197
1198#endif /* DEBUG */
1199
1200
1201/**
1202 * Try execute the problematic code in IEM first, then fall back on REM if there
1203 * is too much of it or if IEM doesn't implement something.
1204 *
1205 * @returns Strict VBox status code from IEMExecLots.
1206 * @param pVM The cross context VM structure.
1207 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1208 * @param pfFFDone Force flags done indicator.
1209 *
1210 * @thread EMT(pVCpu)
1211 */
1212static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1213{
1214 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1215 *pfFFDone = false;
1216
1217 /*
1218 * Execute in IEM for a while.
1219 */
1220 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1221 {
1222 uint32_t cInstructions;
1223 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1224 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1225 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1226 if (rcStrict != VINF_SUCCESS)
1227 {
1228 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1229 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1230 break;
1231
1232 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1233 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1234 return rcStrict;
1235 }
1236
1237 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1238 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1239 {
1240 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1241 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1242 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1243 pVCpu->em.s.enmState = enmNewState;
1244 return VINF_SUCCESS;
1245 }
1246
1247 /*
1248 * Check for pending actions.
1249 */
1250 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1251 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1252 return VINF_SUCCESS;
1253 }
1254
1255 /*
1256 * Switch to REM.
1257 */
1258 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1259 pVCpu->em.s.enmState = EMSTATE_REM;
1260 return VINF_SUCCESS;
1261}
1262
1263
1264/**
1265 * Decides whether to execute RAW, HWACC or REM.
1266 *
1267 * @returns new EM state
1268 * @param pVM The cross context VM structure.
1269 * @param pVCpu The cross context virtual CPU structure.
1270 */
1271EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1272{
1273 /*
1274 * We stay in the wait for SIPI state unless explicitly told otherwise.
1275 */
1276 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1277 return EMSTATE_WAIT_SIPI;
1278
1279 /*
1280 * Execute everything in IEM?
1281 */
1282 if (pVM->em.s.fIemExecutesAll)
1283 return EMSTATE_IEM;
1284
1285 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1286 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1287 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1288
1289 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1290 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1291 {
1292 if (VM_IS_HM_ENABLED(pVM))
1293 {
1294 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1295 return EMSTATE_HM;
1296 }
1297 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1298 return EMSTATE_NEM;
1299
1300 /*
1301 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1302 * turns off monitoring features essential for raw mode!
1303 */
1304 return EMSTATE_IEM_THEN_REM;
1305 }
1306
1307 /*
1308 * Standard raw-mode:
1309 *
1310 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1311 * or 32 bits protected mode ring 0 code
1312 *
1313 * The tests are ordered by the likelihood of being true during normal execution.
1314 */
1315 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1316 {
1317 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1318 return EMSTATE_REM;
1319 }
1320
1321# ifndef VBOX_RAW_V86
1322 if (EFlags.u32 & X86_EFL_VM) {
1323 Log2(("raw mode refused: VM_MASK\n"));
1324 return EMSTATE_REM;
1325 }
1326# endif
1327
1328 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1329 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1330 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1331 {
1332 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1333 return EMSTATE_REM;
1334 }
1335
1336 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1337 {
1338 uint32_t u32Dummy, u32Features;
1339
1340 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1341 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1342 return EMSTATE_REM;
1343 }
1344
1345 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1346 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1347 || (uSS & X86_SEL_RPL) == 3)
1348 {
1349 if (!(EFlags.u32 & X86_EFL_IF))
1350 {
1351 Log2(("raw mode refused: IF (RawR3)\n"));
1352 return EMSTATE_REM;
1353 }
1354
1355 if (!(u32CR0 & X86_CR0_WP))
1356 {
1357 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1358 return EMSTATE_REM;
1359 }
1360 }
1361 else
1362 {
1363 /* Only ring 0 supervisor code. */
1364 if ((uSS & X86_SEL_RPL) != 0)
1365 {
1366 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1367 return EMSTATE_REM;
1368 }
1369
1370 // Let's start with pure 32 bits ring 0 code first
1371 /** @todo What's pure 32-bit mode? flat? */
1372 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1373 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1374 {
1375 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1376 return EMSTATE_REM;
1377 }
1378
1379 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1380 if (!(u32CR0 & X86_CR0_WP))
1381 {
1382 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1383 return EMSTATE_REM;
1384 }
1385
1386# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1387 if (!(EFlags.u32 & X86_EFL_IF))
1388 {
1389 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1390 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1391 return EMSTATE_REM;
1392 }
1393# endif
1394
1395# ifndef VBOX_WITH_RAW_RING1
1396 /** @todo still necessary??? */
1397 if (EFlags.Bits.u2IOPL != 0)
1398 {
1399 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1400 return EMSTATE_REM;
1401 }
1402# endif
1403 }
1404
1405 /*
1406 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1407 */
1408 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1409 {
1410 Log2(("raw mode refused: stale CS\n"));
1411 return EMSTATE_REM;
1412 }
1413 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1414 {
1415 Log2(("raw mode refused: stale SS\n"));
1416 return EMSTATE_REM;
1417 }
1418 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1419 {
1420 Log2(("raw mode refused: stale DS\n"));
1421 return EMSTATE_REM;
1422 }
1423 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1424 {
1425 Log2(("raw mode refused: stale ES\n"));
1426 return EMSTATE_REM;
1427 }
1428 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1429 {
1430 Log2(("raw mode refused: stale FS\n"));
1431 return EMSTATE_REM;
1432 }
1433 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1434 {
1435 Log2(("raw mode refused: stale GS\n"));
1436 return EMSTATE_REM;
1437 }
1438
1439# ifdef VBOX_WITH_SAFE_STR
1440 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1441 {
1442 Log(("Raw mode refused -> TR=0\n"));
1443 return EMSTATE_REM;
1444 }
1445# endif
1446
1447 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1448 return EMSTATE_RAW;
1449}
1450
1451
1452/**
1453 * Executes all high priority post execution force actions.
1454 *
1455 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1456 * fatal error status code.
1457 *
1458 * @param pVM The cross context VM structure.
1459 * @param pVCpu The cross context virtual CPU structure.
1460 * @param rc The current strict VBox status code rc.
1461 */
1462VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1463{
1464 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1465
1466 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1467 PDMCritSectBothFF(pVM, pVCpu);
1468
1469 /* Update CR3 (Nested Paging case for HM). */
1470 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1471 {
1472 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1473 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1474 if (RT_FAILURE(rc2))
1475 return rc2;
1476 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1477 }
1478
1479 /* IEM has pending work (typically memory write after INS instruction). */
1480 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1481 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1482
1483 /* IOM has pending work (comitting an I/O or MMIO write). */
1484 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1485 {
1486 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1487 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1488 { /* half likely, or at least it's a line shorter. */ }
1489 else if (rc == VINF_SUCCESS)
1490 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1491 else
1492 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1493 }
1494
1495 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1496 {
1497 if ( rc > VINF_EM_NO_MEMORY
1498 && rc <= VINF_EM_LAST)
1499 rc = VINF_EM_NO_MEMORY;
1500 }
1501
1502 return rc;
1503}
1504
1505
1506/**
1507 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1508 *
1509 * @returns VBox status code.
1510 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1511 * @param pVCpu The cross context virtual CPU structure.
1512 */
1513static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1514{
1515#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1516 /* Handle the "external interrupt" VM-exit intercept. */
1517 if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
1518 {
1519 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1520 AssertMsg( rcStrict != VINF_VMX_VMEXIT
1521 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1522 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1523 return VBOXSTRICTRC_TODO(rcStrict);
1524 }
1525#else
1526 RT_NOREF(pVCpu);
1527#endif
1528 return VINF_NO_CHANGE;
1529}
1530
1531
1532/**
1533 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1534 *
1535 * @returns VBox status code.
1536 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1537 * @param pVCpu The cross context virtual CPU structure.
1538 */
1539static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1540{
1541#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1542 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1543 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1544 {
1545 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1546 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1547 if (RT_SUCCESS(rcStrict))
1548 {
1549 AssertMsg( rcStrict != VINF_SVM_VMEXIT
1550 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1551 return VBOXSTRICTRC_VAL(rcStrict);
1552 }
1553
1554 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1555 return VINF_EM_TRIPLE_FAULT;
1556 }
1557#else
1558 NOREF(pVCpu);
1559#endif
1560 return VINF_NO_CHANGE;
1561}
1562
1563
1564/**
1565 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1566 *
1567 * @returns VBox status code.
1568 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1569 * @param pVCpu The cross context virtual CPU structure.
1570 */
1571static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1572{
1573#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1574 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1575 {
1576 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1577 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1578 if (RT_SUCCESS(rcStrict))
1579 {
1580 Assert(rcStrict != VINF_SVM_VMEXIT);
1581 return VBOXSTRICTRC_VAL(rcStrict);
1582 }
1583 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1584 return VINF_EM_TRIPLE_FAULT;
1585 }
1586#else
1587 NOREF(pVCpu);
1588#endif
1589 return VINF_NO_CHANGE;
1590}
1591
1592
1593/**
1594 * Executes all pending forced actions.
1595 *
1596 * Forced actions can cause execution delays and execution
1597 * rescheduling. The first we deal with using action priority, so
1598 * that for instance pending timers aren't scheduled and ran until
1599 * right before execution. The rescheduling we deal with using
1600 * return codes. The same goes for VM termination, only in that case
1601 * we exit everything.
1602 *
1603 * @returns VBox status code of equal or greater importance/severity than rc.
1604 * The most important ones are: VINF_EM_RESCHEDULE,
1605 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1606 *
1607 * @param pVM The cross context VM structure.
1608 * @param pVCpu The cross context virtual CPU structure.
1609 * @param rc The current rc.
1610 *
1611 */
1612int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1613{
1614 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1615#ifdef VBOX_STRICT
1616 int rcIrq = VINF_SUCCESS;
1617#endif
1618 int rc2;
1619#define UPDATE_RC() \
1620 do { \
1621 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1622 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1623 break; \
1624 if (!rc || rc2 < rc) \
1625 rc = rc2; \
1626 } while (0)
1627 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1628
1629 /*
1630 * Post execution chunk first.
1631 */
1632 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1633 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1634 {
1635 /*
1636 * EMT Rendezvous (must be serviced before termination).
1637 */
1638 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1639 {
1640 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1641 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1642 UPDATE_RC();
1643 /** @todo HACK ALERT! The following test is to make sure EM+TM
1644 * thinks the VM is stopped/reset before the next VM state change
1645 * is made. We need a better solution for this, or at least make it
1646 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1647 * VINF_EM_SUSPEND). */
1648 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1649 {
1650 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1651 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1652 return rc;
1653 }
1654 }
1655
1656 /*
1657 * State change request (cleared by vmR3SetStateLocked).
1658 */
1659 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1660 {
1661 VMSTATE enmState = VMR3GetState(pVM);
1662 switch (enmState)
1663 {
1664 case VMSTATE_FATAL_ERROR:
1665 case VMSTATE_FATAL_ERROR_LS:
1666 case VMSTATE_GURU_MEDITATION:
1667 case VMSTATE_GURU_MEDITATION_LS:
1668 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1669 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1670 return VINF_EM_SUSPEND;
1671
1672 case VMSTATE_DESTROYING:
1673 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1674 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1675 return VINF_EM_TERMINATE;
1676
1677 default:
1678 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1679 }
1680 }
1681
1682 /*
1683 * Debugger Facility polling.
1684 */
1685 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1686 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1687 {
1688 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1689 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1690 /** @todo why that VINF_EM_DBG_EVENT here? Duplicate info, should be handled
1691 * somewhere before we get here, I would think. */
1692 if (rc == VINF_EM_DBG_EVENT) /* HACK! We should've handled pending debug event. */
1693 rc = rc2;
1694 else
1695 UPDATE_RC();
1696 }
1697
1698 /*
1699 * Postponed reset request.
1700 */
1701 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1702 {
1703 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1704 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1705 UPDATE_RC();
1706 }
1707
1708 /*
1709 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1710 */
1711 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1712 {
1713 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1714 UPDATE_RC();
1715 if (rc == VINF_EM_NO_MEMORY)
1716 return rc;
1717 }
1718
1719 /* check that we got them all */
1720 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1721 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1722 }
1723
1724 /*
1725 * Normal priority then.
1726 * (Executed in no particular order.)
1727 */
1728 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1729 {
1730 /*
1731 * PDM Queues are pending.
1732 */
1733 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1734 PDMR3QueueFlushAll(pVM);
1735
1736 /*
1737 * PDM DMA transfers are pending.
1738 */
1739 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1740 PDMR3DmaRun(pVM);
1741
1742 /*
1743 * EMT Rendezvous (make sure they are handled before the requests).
1744 */
1745 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1746 {
1747 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1748 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1749 UPDATE_RC();
1750 /** @todo HACK ALERT! The following test is to make sure EM+TM
1751 * thinks the VM is stopped/reset before the next VM state change
1752 * is made. We need a better solution for this, or at least make it
1753 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1754 * VINF_EM_SUSPEND). */
1755 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1756 {
1757 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1758 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1759 return rc;
1760 }
1761 }
1762
1763 /*
1764 * Requests from other threads.
1765 */
1766 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1767 {
1768 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1769 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1770 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1771 {
1772 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1773 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1774 return rc2;
1775 }
1776 UPDATE_RC();
1777 /** @todo HACK ALERT! The following test is to make sure EM+TM
1778 * thinks the VM is stopped/reset before the next VM state change
1779 * is made. We need a better solution for this, or at least make it
1780 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1781 * VINF_EM_SUSPEND). */
1782 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1783 {
1784 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1785 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1786 return rc;
1787 }
1788 }
1789
1790 /* check that we got them all */
1791 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1792 }
1793
1794 /*
1795 * Normal priority then. (per-VCPU)
1796 * (Executed in no particular order.)
1797 */
1798 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1799 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1800 {
1801 /*
1802 * Requests from other threads.
1803 */
1804 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1805 {
1806 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1807 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1808 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1809 {
1810 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1811 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1812 return rc2;
1813 }
1814 UPDATE_RC();
1815 /** @todo HACK ALERT! The following test is to make sure EM+TM
1816 * thinks the VM is stopped/reset before the next VM state change
1817 * is made. We need a better solution for this, or at least make it
1818 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1819 * VINF_EM_SUSPEND). */
1820 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1821 {
1822 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1823 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1824 return rc;
1825 }
1826 }
1827
1828 /* check that we got them all */
1829 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1830 }
1831
1832 /*
1833 * High priority pre execution chunk last.
1834 * (Executed in ascending priority order.)
1835 */
1836 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1837 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1838 {
1839 /*
1840 * Timers before interrupts.
1841 */
1842 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1843 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1844 TMR3TimerQueuesDo(pVM);
1845
1846 /*
1847 * Pick up asynchronously posted interrupts into the APIC.
1848 */
1849 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1850 APICUpdatePendingInterrupts(pVCpu);
1851
1852 /*
1853 * The instruction following an emulated STI should *always* be executed!
1854 *
1855 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1856 * the eip is the same as the inhibited instr address. Before we
1857 * are able to execute this instruction in raw mode (iret to
1858 * guest code) an external interrupt might force a world switch
1859 * again. Possibly allowing a guest interrupt to be dispatched
1860 * in the process. This could break the guest. Sounds very
1861 * unlikely, but such timing sensitive problem are not as rare as
1862 * you might think.
1863 */
1864 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1865 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1866 {
1867 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1868 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1869 {
1870 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1871 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1872 }
1873 else
1874 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1875 }
1876
1877 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1878 * delivered. */
1879
1880#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1881 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
1882 {
1883 /*
1884 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1885 * Takes priority over even SMI and INIT signals.
1886 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1887 */
1888 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1889 {
1890 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1891 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1892 UPDATE_RC();
1893 }
1894
1895 /*
1896 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1897 * Takes priority over "Traps on the previous instruction".
1898 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1899 */
1900 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1901 {
1902 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1903 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1904 UPDATE_RC();
1905 }
1906
1907 /*
1908 * VMX Nested-guest preemption timer VM-exit.
1909 * Takes priority over NMI-window VM-exits.
1910 */
1911 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1912 {
1913 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1914 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1915 UPDATE_RC();
1916 }
1917 }
1918#endif
1919
1920 /*
1921 * Guest event injection.
1922 */
1923 Assert(!(pVCpu->cpum.GstCtx.fExtrn & (CPUMCTX_EXTRN_INHIBIT_INT | CPUMCTX_EXTRN_INHIBIT_NMI)));
1924 bool fWakeupPending = false;
1925 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1926 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1927 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
1928 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1929 {
1930 bool fInVmxNonRootMode;
1931 bool fInSvmHwvirtMode;
1932 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
1933 if (fInNestedGuest)
1934 {
1935 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1936 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1937 }
1938 else
1939 {
1940 fInVmxNonRootMode = false;
1941 fInSvmHwvirtMode = false;
1942 }
1943
1944 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
1945 if (fGif)
1946 {
1947#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1948 /*
1949 * VMX NMI-window VM-exit.
1950 * Takes priority over non-maskable interrupts (NMIs).
1951 * Interrupt shadows block NMI-window VM-exits.
1952 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1953 *
1954 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1955 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1956 */
1957 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1958 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1959 {
1960 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1961 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1962 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1963 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1964 && rc2 != VINF_VMX_VMEXIT
1965 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1966 UPDATE_RC();
1967 }
1968 else
1969#endif
1970 /*
1971 * NMIs (take priority over external interrupts).
1972 */
1973 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1974 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1975 {
1976#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1977 if ( fInVmxNonRootMode
1978 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1979 {
1980 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1981 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1982 UPDATE_RC();
1983 }
1984 else
1985#endif
1986#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1987 if ( fInSvmHwvirtMode
1988 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1989 {
1990 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1991 AssertMsg( rc2 != VINF_SVM_VMEXIT
1992 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1993 UPDATE_RC();
1994 }
1995 else
1996#endif
1997 {
1998 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1999 if (rc2 == VINF_SUCCESS)
2000 {
2001 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2002 fWakeupPending = true;
2003 if (pVM->em.s.fIemExecutesAll)
2004 rc2 = VINF_EM_RESCHEDULE;
2005 else
2006 {
2007 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
2008 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
2009 : VINF_EM_RESCHEDULE_REM;
2010 }
2011 }
2012 UPDATE_RC();
2013 }
2014 }
2015#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2016 /*
2017 * VMX Interrupt-window VM-exits.
2018 * Takes priority over external interrupts.
2019 */
2020 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
2021 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
2022 {
2023 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
2024 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
2025 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
2026 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2027 && rc2 != VINF_VMX_VMEXIT
2028 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2029 UPDATE_RC();
2030 }
2031#endif
2032#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2033 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
2034 * actually pending like we currently do. */
2035#endif
2036 /*
2037 * External interrupts.
2038 */
2039 else
2040 {
2041 /*
2042 * VMX: virtual interrupts takes priority over physical interrupts.
2043 * SVM: physical interrupts takes priority over virtual interrupts.
2044 */
2045 if ( fInVmxNonRootMode
2046 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2047 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
2048 {
2049 /** @todo NSTVMX: virtual-interrupt delivery. */
2050 rc2 = VINF_SUCCESS;
2051 }
2052 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2053 && CPUMIsGuestPhysIntrEnabled(pVCpu))
2054 {
2055 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2056 if (fInVmxNonRootMode)
2057 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
2058 else if (fInSvmHwvirtMode)
2059 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
2060 else
2061 rc2 = VINF_NO_CHANGE;
2062
2063 if (rc2 == VINF_NO_CHANGE)
2064 {
2065 bool fInjected = false;
2066 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2067 /** @todo this really isn't nice, should properly handle this */
2068 /* Note! This can still cause a VM-exit (on Intel). */
2069 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
2070 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
2071 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2072 fWakeupPending = true;
2073 if ( pVM->em.s.fIemExecutesAll
2074 && ( rc2 == VINF_EM_RESCHEDULE_REM
2075 || rc2 == VINF_EM_RESCHEDULE_HM
2076 || rc2 == VINF_EM_RESCHEDULE_RAW))
2077 {
2078 rc2 = VINF_EM_RESCHEDULE;
2079 }
2080#ifdef VBOX_STRICT
2081 if (fInjected)
2082 rcIrq = rc2;
2083#endif
2084 }
2085 UPDATE_RC();
2086 }
2087 else if ( fInSvmHwvirtMode
2088 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2089 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2090 {
2091 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2092 if (rc2 == VINF_NO_CHANGE)
2093 {
2094 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2095 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2096 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2097 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2098 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2099 rc2 = VINF_EM_RESCHEDULE;
2100#ifdef VBOX_STRICT
2101 rcIrq = rc2;
2102#endif
2103 }
2104 UPDATE_RC();
2105 }
2106 }
2107 }
2108 }
2109
2110 /*
2111 * Allocate handy pages.
2112 */
2113 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2114 {
2115 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2116 UPDATE_RC();
2117 }
2118
2119 /*
2120 * Debugger Facility request.
2121 */
2122 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2123 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2124 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2125 {
2126 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2127 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2128 UPDATE_RC();
2129 }
2130
2131 /*
2132 * EMT Rendezvous (must be serviced before termination).
2133 */
2134 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2135 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2136 {
2137 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2138 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2139 UPDATE_RC();
2140 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2141 * stopped/reset before the next VM state change is made. We need a better
2142 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2143 * && rc >= VINF_EM_SUSPEND). */
2144 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2145 {
2146 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2147 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2148 return rc;
2149 }
2150 }
2151
2152 /*
2153 * State change request (cleared by vmR3SetStateLocked).
2154 */
2155 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2156 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2157 {
2158 VMSTATE enmState = VMR3GetState(pVM);
2159 switch (enmState)
2160 {
2161 case VMSTATE_FATAL_ERROR:
2162 case VMSTATE_FATAL_ERROR_LS:
2163 case VMSTATE_GURU_MEDITATION:
2164 case VMSTATE_GURU_MEDITATION_LS:
2165 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2166 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2167 return VINF_EM_SUSPEND;
2168
2169 case VMSTATE_DESTROYING:
2170 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2171 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2172 return VINF_EM_TERMINATE;
2173
2174 default:
2175 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2176 }
2177 }
2178
2179 /*
2180 * Out of memory? Since most of our fellow high priority actions may cause us
2181 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2182 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2183 * than us since we can terminate without allocating more memory.
2184 */
2185 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2186 {
2187 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2188 UPDATE_RC();
2189 if (rc == VINF_EM_NO_MEMORY)
2190 return rc;
2191 }
2192
2193 /*
2194 * If the virtual sync clock is still stopped, make TM restart it.
2195 */
2196 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2197 TMR3VirtualSyncFF(pVM, pVCpu);
2198
2199#ifdef DEBUG
2200 /*
2201 * Debug, pause the VM.
2202 */
2203 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2204 {
2205 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2206 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2207 return VINF_EM_SUSPEND;
2208 }
2209#endif
2210
2211 /* check that we got them all */
2212 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2213 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2214 }
2215
2216#undef UPDATE_RC
2217 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2218 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2219 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2220 return rc;
2221}
2222
2223
2224/**
2225 * Check if the preset execution time cap restricts guest execution scheduling.
2226 *
2227 * @returns true if allowed, false otherwise
2228 * @param pVM The cross context VM structure.
2229 * @param pVCpu The cross context virtual CPU structure.
2230 */
2231bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2232{
2233 uint64_t u64UserTime, u64KernelTime;
2234
2235 if ( pVM->uCpuExecutionCap != 100
2236 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2237 {
2238 uint64_t u64TimeNow = RTTimeMilliTS();
2239 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2240 {
2241 /* New time slice. */
2242 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2243 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2244 pVCpu->em.s.u64TimeSliceExec = 0;
2245 }
2246 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2247
2248 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2249 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2250 return false;
2251 }
2252 return true;
2253}
2254
2255
2256/**
2257 * Execute VM.
2258 *
2259 * This function is the main loop of the VM. The emulation thread
2260 * calls this function when the VM has been successfully constructed
2261 * and we're ready for executing the VM.
2262 *
2263 * Returning from this function means that the VM is turned off or
2264 * suspended (state already saved) and deconstruction is next in line.
2265 *
2266 * All interaction from other thread are done using forced actions
2267 * and signalling of the wait object.
2268 *
2269 * @returns VBox status code, informational status codes may indicate failure.
2270 * @param pVM The cross context VM structure.
2271 * @param pVCpu The cross context virtual CPU structure.
2272 */
2273VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2274{
2275 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2276 pVM,
2277 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2278 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2279 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2280 VM_ASSERT_EMT(pVM);
2281 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2282 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2283 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2284 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2285
2286 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2287 if (rc == 0)
2288 {
2289 /*
2290 * Start the virtual time.
2291 */
2292 TMR3NotifyResume(pVM, pVCpu);
2293
2294 /*
2295 * The Outer Main Loop.
2296 */
2297 bool fFFDone = false;
2298
2299 /* Reschedule right away to start in the right state. */
2300 rc = VINF_SUCCESS;
2301
2302 /* If resuming after a pause or a state load, restore the previous
2303 state or else we'll start executing code. Else, just reschedule. */
2304 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2305 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2306 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2307 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2308 else
2309 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2310 pVCpu->em.s.cIemThenRemInstructions = 0;
2311 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2312
2313 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2314 for (;;)
2315 {
2316 /*
2317 * Before we can schedule anything (we're here because
2318 * scheduling is required) we must service any pending
2319 * forced actions to avoid any pending action causing
2320 * immediate rescheduling upon entering an inner loop
2321 *
2322 * Do forced actions.
2323 */
2324 if ( !fFFDone
2325 && RT_SUCCESS(rc)
2326 && rc != VINF_EM_TERMINATE
2327 && rc != VINF_EM_OFF
2328 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2329 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2330 {
2331 rc = emR3ForcedActions(pVM, pVCpu, rc);
2332 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2333 }
2334 else if (fFFDone)
2335 fFFDone = false;
2336
2337 /*
2338 * Now what to do?
2339 */
2340 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2341 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2342 switch (rc)
2343 {
2344 /*
2345 * Keep doing what we're currently doing.
2346 */
2347 case VINF_SUCCESS:
2348 break;
2349
2350 /*
2351 * Reschedule - to raw-mode execution.
2352 */
2353/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2354 case VINF_EM_RESCHEDULE_RAW:
2355 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2356 if (VM_IS_RAW_MODE_ENABLED(pVM))
2357 {
2358 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2359 pVCpu->em.s.enmState = EMSTATE_RAW;
2360 }
2361 else
2362 {
2363 AssertLogRelFailed();
2364 pVCpu->em.s.enmState = EMSTATE_NONE;
2365 }
2366 break;
2367
2368 /*
2369 * Reschedule - to HM or NEM.
2370 */
2371 case VINF_EM_RESCHEDULE_HM:
2372 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2373 if (VM_IS_HM_ENABLED(pVM))
2374 {
2375 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2376 pVCpu->em.s.enmState = EMSTATE_HM;
2377 }
2378 else if (VM_IS_NEM_ENABLED(pVM))
2379 {
2380 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2381 pVCpu->em.s.enmState = EMSTATE_NEM;
2382 }
2383 else
2384 {
2385 AssertLogRelFailed();
2386 pVCpu->em.s.enmState = EMSTATE_NONE;
2387 }
2388 break;
2389
2390 /*
2391 * Reschedule - to recompiled execution.
2392 */
2393 case VINF_EM_RESCHEDULE_REM:
2394 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2395 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2396 {
2397 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2398 enmOldState, EMSTATE_IEM_THEN_REM));
2399 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2400 {
2401 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2402 pVCpu->em.s.cIemThenRemInstructions = 0;
2403 }
2404 }
2405 else
2406 {
2407 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2408 pVCpu->em.s.enmState = EMSTATE_REM;
2409 }
2410 break;
2411
2412 /*
2413 * Resume.
2414 */
2415 case VINF_EM_RESUME:
2416 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2417 /* Don't reschedule in the halted or wait for SIPI case. */
2418 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2419 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2420 {
2421 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2422 break;
2423 }
2424 /* fall through and get scheduled. */
2425 RT_FALL_THRU();
2426
2427 /*
2428 * Reschedule.
2429 */
2430 case VINF_EM_RESCHEDULE:
2431 {
2432 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2433 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2434 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2435 pVCpu->em.s.cIemThenRemInstructions = 0;
2436 pVCpu->em.s.enmState = enmState;
2437 break;
2438 }
2439
2440 /*
2441 * Halted.
2442 */
2443 case VINF_EM_HALT:
2444 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2445 pVCpu->em.s.enmState = EMSTATE_HALTED;
2446 break;
2447
2448 /*
2449 * Switch to the wait for SIPI state (application processor only)
2450 */
2451 case VINF_EM_WAIT_SIPI:
2452 Assert(pVCpu->idCpu != 0);
2453 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2454 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2455 break;
2456
2457
2458 /*
2459 * Suspend.
2460 */
2461 case VINF_EM_SUSPEND:
2462 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2463 Assert(enmOldState != EMSTATE_SUSPENDED);
2464 pVCpu->em.s.enmPrevState = enmOldState;
2465 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2466 break;
2467
2468 /*
2469 * Reset.
2470 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2471 */
2472 case VINF_EM_RESET:
2473 {
2474 if (pVCpu->idCpu == 0)
2475 {
2476 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2477 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2478 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2479 pVCpu->em.s.cIemThenRemInstructions = 0;
2480 pVCpu->em.s.enmState = enmState;
2481 }
2482 else
2483 {
2484 /* All other VCPUs go into the wait for SIPI state. */
2485 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2486 }
2487 break;
2488 }
2489
2490 /*
2491 * Power Off.
2492 */
2493 case VINF_EM_OFF:
2494 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2495 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2496 TMR3NotifySuspend(pVM, pVCpu);
2497 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2498 return rc;
2499
2500 /*
2501 * Terminate the VM.
2502 */
2503 case VINF_EM_TERMINATE:
2504 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2505 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2506 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2507 TMR3NotifySuspend(pVM, pVCpu);
2508 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2509 return rc;
2510
2511
2512 /*
2513 * Out of memory, suspend the VM and stuff.
2514 */
2515 case VINF_EM_NO_MEMORY:
2516 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2517 Assert(enmOldState != EMSTATE_SUSPENDED);
2518 pVCpu->em.s.enmPrevState = enmOldState;
2519 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2520 TMR3NotifySuspend(pVM, pVCpu);
2521 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2522
2523 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2524 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2525 if (rc != VINF_EM_SUSPEND)
2526 {
2527 if (RT_SUCCESS_NP(rc))
2528 {
2529 AssertLogRelMsgFailed(("%Rrc\n", rc));
2530 rc = VERR_EM_INTERNAL_ERROR;
2531 }
2532 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2533 }
2534 return rc;
2535
2536 /*
2537 * Guest debug events.
2538 */
2539 case VINF_EM_DBG_STEPPED:
2540 case VINF_EM_DBG_STOP:
2541 case VINF_EM_DBG_EVENT:
2542 case VINF_EM_DBG_BREAKPOINT:
2543 case VINF_EM_DBG_STEP:
2544 if (enmOldState == EMSTATE_RAW)
2545 {
2546 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2547 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2548 }
2549 else if (enmOldState == EMSTATE_HM)
2550 {
2551 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2552 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2553 }
2554 else if (enmOldState == EMSTATE_NEM)
2555 {
2556 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2557 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2558 }
2559 else if (enmOldState == EMSTATE_REM)
2560 {
2561 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2562 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2563 }
2564 else
2565 {
2566 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2567 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2568 }
2569 break;
2570
2571 /*
2572 * Hypervisor debug events.
2573 */
2574 case VINF_EM_DBG_HYPER_STEPPED:
2575 case VINF_EM_DBG_HYPER_BREAKPOINT:
2576 case VINF_EM_DBG_HYPER_ASSERTION:
2577 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2578 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2579 break;
2580
2581 /*
2582 * Triple fault.
2583 */
2584 case VINF_EM_TRIPLE_FAULT:
2585 if (!pVM->em.s.fGuruOnTripleFault)
2586 {
2587 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2588 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2589 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2590 continue;
2591 }
2592 /* Else fall through and trigger a guru. */
2593 RT_FALL_THRU();
2594
2595 case VERR_VMM_RING0_ASSERTION:
2596 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2597 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2598 break;
2599
2600 /*
2601 * Any error code showing up here other than the ones we
2602 * know and process above are considered to be FATAL.
2603 *
2604 * Unknown warnings and informational status codes are also
2605 * included in this.
2606 */
2607 default:
2608 if (RT_SUCCESS_NP(rc))
2609 {
2610 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2611 rc = VERR_EM_INTERNAL_ERROR;
2612 }
2613 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2614 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2615 break;
2616 }
2617
2618 /*
2619 * Act on state transition.
2620 */
2621 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2622 if (enmOldState != enmNewState)
2623 {
2624 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2625
2626 /* Clear MWait flags and the unhalt FF. */
2627 if ( enmOldState == EMSTATE_HALTED
2628 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2629 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2630 && ( enmNewState == EMSTATE_RAW
2631 || enmNewState == EMSTATE_HM
2632 || enmNewState == EMSTATE_NEM
2633 || enmNewState == EMSTATE_REM
2634 || enmNewState == EMSTATE_IEM_THEN_REM
2635 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2636 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2637 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2638 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2639 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2640 {
2641 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2642 {
2643 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2644 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2645 }
2646 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2647 {
2648 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2649 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2650 }
2651 }
2652 }
2653 else
2654 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2655
2656 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2657 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2658
2659 /*
2660 * Act on the new state.
2661 */
2662 switch (enmNewState)
2663 {
2664 /*
2665 * Execute raw.
2666 */
2667 case EMSTATE_RAW:
2668 AssertLogRelMsgFailed(("%Rrc\n", rc));
2669 rc = VERR_EM_INTERNAL_ERROR;
2670 break;
2671
2672 /*
2673 * Execute hardware accelerated raw.
2674 */
2675 case EMSTATE_HM:
2676 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2677 break;
2678
2679 /*
2680 * Execute hardware accelerated raw.
2681 */
2682 case EMSTATE_NEM:
2683 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2684 break;
2685
2686 /*
2687 * Execute recompiled.
2688 */
2689 case EMSTATE_REM:
2690 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2691 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2692 break;
2693
2694 /*
2695 * Execute in the interpreter.
2696 */
2697 case EMSTATE_IEM:
2698 {
2699 uint32_t cInstructions = 0;
2700#if 0 /* For testing purposes. */
2701 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2702 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2703 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2704 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2705 rc = VINF_SUCCESS;
2706 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2707#endif
2708 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2709 if (pVM->em.s.fIemExecutesAll)
2710 {
2711 Assert(rc != VINF_EM_RESCHEDULE_REM);
2712 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2713 Assert(rc != VINF_EM_RESCHEDULE_HM);
2714#ifdef VBOX_HIGH_RES_TIMERS_HACK
2715 if (cInstructions < 2048)
2716 TMTimerPollVoid(pVM, pVCpu);
2717#endif
2718 }
2719 fFFDone = false;
2720 break;
2721 }
2722
2723 /*
2724 * Execute in IEM, hoping we can quickly switch aback to HM
2725 * or RAW execution. If our hopes fail, we go to REM.
2726 */
2727 case EMSTATE_IEM_THEN_REM:
2728 {
2729 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2730 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2731 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2732 break;
2733 }
2734
2735 /*
2736 * Application processor execution halted until SIPI.
2737 */
2738 case EMSTATE_WAIT_SIPI:
2739 /* no break */
2740 /*
2741 * hlt - execution halted until interrupt.
2742 */
2743 case EMSTATE_HALTED:
2744 {
2745 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2746 /* If HM (or someone else) store a pending interrupt in
2747 TRPM, it must be dispatched ASAP without any halting.
2748 Anything pending in TRPM has been accepted and the CPU
2749 should already be the right state to receive it. */
2750 if (TRPMHasTrap(pVCpu))
2751 rc = VINF_EM_RESCHEDULE;
2752 /* MWAIT has a special extension where it's woken up when
2753 an interrupt is pending even when IF=0. */
2754 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2755 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2756 {
2757 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2758 if (rc == VINF_SUCCESS)
2759 {
2760 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2761 APICUpdatePendingInterrupts(pVCpu);
2762
2763 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2764 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2765 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2766 {
2767 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2768 rc = VINF_EM_RESCHEDULE;
2769 }
2770 }
2771 }
2772 else
2773 {
2774 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2775 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2776 check VMCPU_FF_UPDATE_APIC here. */
2777 if ( rc == VINF_SUCCESS
2778 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2779 {
2780 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2781 rc = VINF_EM_RESCHEDULE;
2782 }
2783 }
2784
2785 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2786 break;
2787 }
2788
2789 /*
2790 * Suspended - return to VM.cpp.
2791 */
2792 case EMSTATE_SUSPENDED:
2793 TMR3NotifySuspend(pVM, pVCpu);
2794 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2795 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2796 return VINF_EM_SUSPEND;
2797
2798 /*
2799 * Debugging in the guest.
2800 */
2801 case EMSTATE_DEBUG_GUEST_RAW:
2802 case EMSTATE_DEBUG_GUEST_HM:
2803 case EMSTATE_DEBUG_GUEST_NEM:
2804 case EMSTATE_DEBUG_GUEST_IEM:
2805 case EMSTATE_DEBUG_GUEST_REM:
2806 TMR3NotifySuspend(pVM, pVCpu);
2807 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2808 TMR3NotifyResume(pVM, pVCpu);
2809 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2810 break;
2811
2812 /*
2813 * Debugging in the hypervisor.
2814 */
2815 case EMSTATE_DEBUG_HYPER:
2816 {
2817 TMR3NotifySuspend(pVM, pVCpu);
2818 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2819
2820 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2821 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2822 if (rc != VINF_SUCCESS)
2823 {
2824 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2825 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2826 else
2827 {
2828 /* switch to guru meditation mode */
2829 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2830 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2831 VMMR3FatalDump(pVM, pVCpu, rc);
2832 }
2833 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2834 return rc;
2835 }
2836
2837 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2838 TMR3NotifyResume(pVM, pVCpu);
2839 break;
2840 }
2841
2842 /*
2843 * Guru meditation takes place in the debugger.
2844 */
2845 case EMSTATE_GURU_MEDITATION:
2846 {
2847 TMR3NotifySuspend(pVM, pVCpu);
2848 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2849 VMMR3FatalDump(pVM, pVCpu, rc);
2850 emR3Debug(pVM, pVCpu, rc);
2851 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2852 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2853 return rc;
2854 }
2855
2856 /*
2857 * The states we don't expect here.
2858 */
2859 case EMSTATE_NONE:
2860 case EMSTATE_TERMINATING:
2861 default:
2862 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2863 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2864 TMR3NotifySuspend(pVM, pVCpu);
2865 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2866 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2867 return VERR_EM_INTERNAL_ERROR;
2868 }
2869 } /* The Outer Main Loop */
2870 }
2871 else
2872 {
2873 /*
2874 * Fatal error.
2875 */
2876 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2877 TMR3NotifySuspend(pVM, pVCpu);
2878 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2879 VMMR3FatalDump(pVM, pVCpu, rc);
2880 emR3Debug(pVM, pVCpu, rc);
2881 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2882 /** @todo change the VM state! */
2883 return rc;
2884 }
2885
2886 /* not reached */
2887}
2888
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette