VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 92473

Last change on this file since 92473 was 92119, checked in by vboxsync, 3 years ago

VMM/EM: Stop StatREMTotal when need for reschduling is detected and we return directly form the loop. Log call to TRPMR3InjectEvent. bugref:10122

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 115.9 KB
Line 
1/* $Id: EM.cpp 92119 2021-10-28 00:29:09Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#include <VBox/vmm/apic.h>
50#include <VBox/vmm/tm.h>
51#include <VBox/vmm/mm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/pdmapi.h>
54#include <VBox/vmm/pdmcritsect.h>
55#include <VBox/vmm/pdmqueue.h>
56#include <VBox/vmm/hm.h>
57#include "EMInternal.h"
58#include <VBox/vmm/vm.h>
59#include <VBox/vmm/uvm.h>
60#include <VBox/vmm/cpumdis.h>
61#include <VBox/dis.h>
62#include <VBox/disopcode.h>
63#include <VBox/err.h>
64#include "VMMTracing.h"
65
66#include <iprt/asm.h>
67#include <iprt/string.h>
68#include <iprt/stream.h>
69#include <iprt/thread.h>
70
71
72/*********************************************************************************************************************************
73* Internal Functions *
74*********************************************************************************************************************************/
75static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
76static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
77#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
78static const char *emR3GetStateName(EMSTATE enmState);
79#endif
80static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
81#if defined(VBOX_WITH_REM) || defined(DEBUG)
82static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
83#endif
84static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
85
86
87/**
88 * Initializes the EM.
89 *
90 * @returns VBox status code.
91 * @param pVM The cross context VM structure.
92 */
93VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
94{
95 LogFlow(("EMR3Init\n"));
96 /*
97 * Assert alignment and sizes.
98 */
99 AssertCompileMemberAlignment(VM, em.s, 32);
100 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
101 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
102 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
103
104 /*
105 * Init the structure.
106 */
107 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
108 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
109
110 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
111 AssertLogRelRCReturn(rc, rc);
112
113 bool fEnabled;
114 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
115 AssertLogRelRCReturn(rc, rc);
116 pVM->em.s.fGuruOnTripleFault = !fEnabled;
117 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
118 {
119 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
120 pVM->em.s.fGuruOnTripleFault = true;
121 }
122
123 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
124
125 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
126 * Whether to try correlate exit history in any context, detect hot spots and
127 * try optimize these using IEM if there are other exits close by. This
128 * overrides the context specific settings. */
129 bool fExitOptimizationEnabled = true;
130 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
131 AssertLogRelRCReturn(rc, rc);
132
133 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
134 * Whether to optimize exits in ring-0. Setting this to false will also disable
135 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
136 * capabilities of the host kernel, this optimization may be unavailable. */
137 bool fExitOptimizationEnabledR0 = true;
138 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
139 AssertLogRelRCReturn(rc, rc);
140 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
141
142 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
143 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
144 * hooks are in effect). */
145 /** @todo change the default to true here */
146 bool fExitOptimizationEnabledR0PreemptDisabled = true;
147 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
148 AssertLogRelRCReturn(rc, rc);
149 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
150
151 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
152 * Maximum number of instruction to let EMHistoryExec execute in one go. */
153 uint16_t cHistoryExecMaxInstructions = 8192;
154 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
155 AssertLogRelRCReturn(rc, rc);
156 if (cHistoryExecMaxInstructions < 16)
157 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
158
159 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
160 * Maximum number of instruction between exits during probing. */
161 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
162#ifdef RT_OS_WINDOWS
163 if (VM_IS_NEM_ENABLED(pVM))
164 cHistoryProbeMaxInstructionsWithoutExit = 32;
165#endif
166 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
167 cHistoryProbeMaxInstructionsWithoutExit);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
171 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
172
173 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
174 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
175 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
176 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
177 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
178 cHistoryProbeMinInstructions);
179 AssertLogRelRCReturn(rc, rc);
180
181 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
182 {
183 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
184 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
185 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
186 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
187 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
188 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
189 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
190 }
191
192 /*
193 * Saved state.
194 */
195 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
196 NULL, NULL, NULL,
197 NULL, emR3Save, NULL,
198 NULL, emR3Load, NULL);
199 if (RT_FAILURE(rc))
200 return rc;
201
202 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
203 {
204 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
205
206 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
207 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
208 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
209 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
210
211# define EM_REG_COUNTER(a, b, c) \
212 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
213 AssertRC(rc);
214
215# define EM_REG_COUNTER_USED(a, b, c) \
216 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
217 AssertRC(rc);
218
219# define EM_REG_PROFILE(a, b, c) \
220 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
221 AssertRC(rc);
222
223# define EM_REG_PROFILE_ADV(a, b, c) \
224 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
225 AssertRC(rc);
226
227 /*
228 * Statistics.
229 */
230#ifdef VBOX_WITH_STATISTICS
231 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
232 EM_REG_COUNTER_USED(&pVCpu->em.s.StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
233
234 /* these should be considered for release statistics. */
235 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
236 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
237 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
238 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
239 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
240 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
241 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
242 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
243#endif /* VBOX_WITH_STATISTICS */
244 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
245 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
246#ifdef VBOX_WITH_STATISTICS
247 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
248 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
249 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
250 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
251 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
252 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
253#endif /* VBOX_WITH_STATISTICS */
254
255 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
256 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
257 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
258 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
259 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
260
261 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
262
263 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
264 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
265 AssertRC(rc);
266
267 /* History record statistics */
268 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
269 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
270 AssertRC(rc);
271
272 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
273 {
274 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
275 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
276 AssertRC(rc);
277 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
278 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
279 AssertRC(rc);
280 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
281 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
282 AssertRC(rc);
283 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
284 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
285 AssertRC(rc);
286 }
287
288 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
289 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
290 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
291 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
292 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
293 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
294 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
295 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
296 }
297
298 emR3InitDbg(pVM);
299 return VINF_SUCCESS;
300}
301
302
303/**
304 * Called when a VM initialization stage is completed.
305 *
306 * @returns VBox status code.
307 * @param pVM The cross context VM structure.
308 * @param enmWhat The initialization state that was completed.
309 */
310VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
311{
312 if (enmWhat == VMINITCOMPLETED_RING0)
313 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
314 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
315 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
316 return VINF_SUCCESS;
317}
318
319
320/**
321 * Applies relocations to data and code managed by this
322 * component. This function will be called at init and
323 * whenever the VMM need to relocate it self inside the GC.
324 *
325 * @param pVM The cross context VM structure.
326 */
327VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
328{
329 LogFlow(("EMR3Relocate\n"));
330 RT_NOREF(pVM);
331}
332
333
334/**
335 * Reset the EM state for a CPU.
336 *
337 * Called by EMR3Reset and hot plugging.
338 *
339 * @param pVCpu The cross context virtual CPU structure.
340 */
341VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
342{
343 /* Reset scheduling state. */
344 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
345
346 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
347 out of the HALTED state here so that enmPrevState doesn't end up as
348 HALTED when EMR3Execute returns. */
349 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
350 {
351 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
352 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
353 }
354}
355
356
357/**
358 * Reset notification.
359 *
360 * @param pVM The cross context VM structure.
361 */
362VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
363{
364 Log(("EMR3Reset: \n"));
365 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
366 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
367}
368
369
370/**
371 * Terminates the EM.
372 *
373 * Termination means cleaning up and freeing all resources,
374 * the VM it self is at this point powered off or suspended.
375 *
376 * @returns VBox status code.
377 * @param pVM The cross context VM structure.
378 */
379VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
380{
381 RT_NOREF(pVM);
382 return VINF_SUCCESS;
383}
384
385
386/**
387 * Execute state save operation.
388 *
389 * @returns VBox status code.
390 * @param pVM The cross context VM structure.
391 * @param pSSM SSM operation handle.
392 */
393static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
394{
395 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
396 {
397 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
398
399 SSMR3PutBool(pSSM, false /*fForceRAW*/);
400
401 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
402 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
403 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
404
405 /* Save mwait state. */
406 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
407 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
408 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
409 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
410 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
411 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
412 AssertRCReturn(rc, rc);
413 }
414 return VINF_SUCCESS;
415}
416
417
418/**
419 * Execute state load operation.
420 *
421 * @returns VBox status code.
422 * @param pVM The cross context VM structure.
423 * @param pSSM SSM operation handle.
424 * @param uVersion Data layout version.
425 * @param uPass The data pass.
426 */
427static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
428{
429 /*
430 * Validate version.
431 */
432 if ( uVersion > EM_SAVED_STATE_VERSION
433 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
434 {
435 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
436 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
437 }
438 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
439
440 /*
441 * Load the saved state.
442 */
443 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
444 {
445 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
446
447 bool fForceRAWIgnored;
448 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
449 AssertRCReturn(rc, rc);
450
451 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
452 {
453 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
454 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
455
456 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
457 }
458 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
459 {
460 /* Load mwait state. */
461 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
462 AssertRCReturn(rc, rc);
463 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
464 AssertRCReturn(rc, rc);
465 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
466 AssertRCReturn(rc, rc);
467 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
468 AssertRCReturn(rc, rc);
469 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
470 AssertRCReturn(rc, rc);
471 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
472 AssertRCReturn(rc, rc);
473 }
474 }
475 return VINF_SUCCESS;
476}
477
478
479/**
480 * Argument packet for emR3SetExecutionPolicy.
481 */
482struct EMR3SETEXECPOLICYARGS
483{
484 EMEXECPOLICY enmPolicy;
485 bool fEnforce;
486};
487
488
489/**
490 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
491 */
492static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
493{
494 /*
495 * Only the first CPU changes the variables.
496 */
497 if (pVCpu->idCpu == 0)
498 {
499 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
500 switch (pArgs->enmPolicy)
501 {
502 case EMEXECPOLICY_RECOMPILE_RING0:
503 case EMEXECPOLICY_RECOMPILE_RING3:
504 break;
505 case EMEXECPOLICY_IEM_ALL:
506 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
507 break;
508 default:
509 AssertFailedReturn(VERR_INVALID_PARAMETER);
510 }
511 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
512 }
513
514 /*
515 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
516 */
517 return pVCpu->em.s.enmState == EMSTATE_RAW
518 || pVCpu->em.s.enmState == EMSTATE_HM
519 || pVCpu->em.s.enmState == EMSTATE_NEM
520 || pVCpu->em.s.enmState == EMSTATE_IEM
521 || pVCpu->em.s.enmState == EMSTATE_REM
522 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
523 ? VINF_EM_RESCHEDULE
524 : VINF_SUCCESS;
525}
526
527
528/**
529 * Changes an execution scheduling policy parameter.
530 *
531 * This is used to enable or disable raw-mode / hardware-virtualization
532 * execution of user and supervisor code.
533 *
534 * @returns VINF_SUCCESS on success.
535 * @returns VINF_RESCHEDULE if a rescheduling might be required.
536 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
537 *
538 * @param pUVM The user mode VM handle.
539 * @param enmPolicy The scheduling policy to change.
540 * @param fEnforce Whether to enforce the policy or not.
541 */
542VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
543{
544 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
545 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
546 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
547
548 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
549 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
550}
551
552
553/**
554 * Queries an execution scheduling policy parameter.
555 *
556 * @returns VBox status code
557 * @param pUVM The user mode VM handle.
558 * @param enmPolicy The scheduling policy to query.
559 * @param pfEnforced Where to return the current value.
560 */
561VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
562{
563 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
564 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
565 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
566 PVM pVM = pUVM->pVM;
567 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
568
569 /* No need to bother EMTs with a query. */
570 switch (enmPolicy)
571 {
572 case EMEXECPOLICY_RECOMPILE_RING0:
573 case EMEXECPOLICY_RECOMPILE_RING3:
574 *pfEnforced = false;
575 break;
576 case EMEXECPOLICY_IEM_ALL:
577 *pfEnforced = pVM->em.s.fIemExecutesAll;
578 break;
579 default:
580 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
581 }
582
583 return VINF_SUCCESS;
584}
585
586
587/**
588 * Queries the main execution engine of the VM.
589 *
590 * @returns VBox status code
591 * @param pUVM The user mode VM handle.
592 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
593 */
594VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
595{
596 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
597 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
598
599 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
600 PVM pVM = pUVM->pVM;
601 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
602
603 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
604 return VINF_SUCCESS;
605}
606
607
608/**
609 * Raise a fatal error.
610 *
611 * Safely terminate the VM with full state report and stuff. This function
612 * will naturally never return.
613 *
614 * @param pVCpu The cross context virtual CPU structure.
615 * @param rc VBox status code.
616 */
617VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
618{
619 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
620 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
621}
622
623
624#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
625/**
626 * Gets the EM state name.
627 *
628 * @returns pointer to read only state name,
629 * @param enmState The state.
630 */
631static const char *emR3GetStateName(EMSTATE enmState)
632{
633 switch (enmState)
634 {
635 case EMSTATE_NONE: return "EMSTATE_NONE";
636 case EMSTATE_RAW: return "EMSTATE_RAW";
637 case EMSTATE_HM: return "EMSTATE_HM";
638 case EMSTATE_IEM: return "EMSTATE_IEM";
639 case EMSTATE_REM: return "EMSTATE_REM";
640 case EMSTATE_HALTED: return "EMSTATE_HALTED";
641 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
642 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
643 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
644 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
645 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
646 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
647 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
648 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
649 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
650 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
651 case EMSTATE_NEM: return "EMSTATE_NEM";
652 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
653 default: return "Unknown!";
654 }
655}
656#endif /* LOG_ENABLED || VBOX_STRICT */
657
658
659/**
660 * Handle pending ring-3 I/O port write.
661 *
662 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
663 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
664 *
665 * @returns Strict VBox status code.
666 * @param pVM The cross context VM structure.
667 * @param pVCpu The cross context virtual CPU structure.
668 */
669VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
670{
671 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
672
673 /* Get and clear the pending data. */
674 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
675 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
676 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
677 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
678 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
679
680 /* Assert sanity. */
681 switch (cbValue)
682 {
683 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
684 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
685 case 4: break;
686 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
687 }
688 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
689
690 /* Do the work.*/
691 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
692 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
693 if (IOM_SUCCESS(rcStrict))
694 {
695 pVCpu->cpum.GstCtx.rip += cbInstr;
696 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
697 }
698 return rcStrict;
699}
700
701
702/**
703 * Handle pending ring-3 I/O port write.
704 *
705 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
706 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
707 *
708 * @returns Strict VBox status code.
709 * @param pVM The cross context VM structure.
710 * @param pVCpu The cross context virtual CPU structure.
711 */
712VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
713{
714 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
715
716 /* Get and clear the pending data. */
717 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
718 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
719 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
720 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
721
722 /* Assert sanity. */
723 switch (cbValue)
724 {
725 case 1: break;
726 case 2: break;
727 case 4: break;
728 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
729 }
730 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
731 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
732
733 /* Do the work.*/
734 uint32_t uValue = 0;
735 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
736 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
737 if (IOM_SUCCESS(rcStrict))
738 {
739 if (cbValue == 4)
740 pVCpu->cpum.GstCtx.rax = uValue;
741 else if (cbValue == 2)
742 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
743 else
744 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
745 pVCpu->cpum.GstCtx.rip += cbInstr;
746 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
747 }
748 return rcStrict;
749}
750
751
752/**
753 * @callback_method_impl{FNVMMEMTRENDEZVOUS,
754 * Worker for emR3ExecuteSplitLockInstruction}
755 */
756static DECLCALLBACK(VBOXSTRICTRC) emR3ExecuteSplitLockInstructionRendezvous(PVM pVM, PVMCPU pVCpu, void *pvUser)
757{
758 /* Only execute on the specified EMT. */
759 if (pVCpu == (PVMCPU)pvUser)
760 {
761 LogFunc(("\n"));
762 VBOXSTRICTRC rcStrict = IEMExecOneIgnoreLock(pVCpu);
763 LogFunc(("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
764 if (rcStrict == VINF_IEM_RAISED_XCPT)
765 rcStrict = VINF_SUCCESS;
766 return rcStrict;
767 }
768 RT_NOREF(pVM);
769 return VINF_SUCCESS;
770}
771
772
773/**
774 * Handle an instruction causing a split cacheline lock access in SMP VMs.
775 *
776 * Generally we only get here if the host has split-lock detection enabled and
777 * this caused an \#AC because of something the guest did. If we interpret the
778 * instruction as-is, we'll likely just repeat the split-lock access and
779 * possibly be killed, get a SIGBUS, or trigger a warning followed by extra MSR
780 * changes on context switching (costs a tiny bit). Assuming these \#ACs are
781 * rare to non-existing, we'll do a rendezvous of all EMTs and tell IEM to
782 * disregard the lock prefix when emulating the instruction.
783 *
784 * Yes, we could probably modify the MSR (or MSRs) controlling the detection
785 * feature when entering guest context, but the support for the feature isn't a
786 * 100% given and we'll need the debug-only supdrvOSMsrProberRead and
787 * supdrvOSMsrProberWrite functionality from SUPDrv.cpp to safely detect it.
788 * Thus the approach is to just deal with the spurious \#ACs first and maybe add
789 * propert detection to SUPDrv later if we find it necessary.
790 *
791 * @see @bugref{10052}
792 *
793 * @returns Strict VBox status code.
794 * @param pVM The cross context VM structure.
795 * @param pVCpu The cross context virtual CPU structure.
796 */
797VBOXSTRICTRC emR3ExecuteSplitLockInstruction(PVM pVM, PVMCPU pVCpu)
798{
799 LogFunc(("\n"));
800 return VMMR3EmtRendezvous(pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_ALL_AT_ONCE, emR3ExecuteSplitLockInstructionRendezvous, pVCpu);
801}
802
803
804/**
805 * Debug loop.
806 *
807 * @returns VBox status code for EM.
808 * @param pVM The cross context VM structure.
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param rc Current EM VBox status code.
811 */
812static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
813{
814 for (;;)
815 {
816 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
817 const VBOXSTRICTRC rcLast = rc;
818
819 /*
820 * Debug related RC.
821 */
822 switch (VBOXSTRICTRC_VAL(rc))
823 {
824 /*
825 * Single step an instruction.
826 */
827 case VINF_EM_DBG_STEP:
828 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
829 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
830 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
831 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
832 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
833 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
834 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
835#ifdef VBOX_WITH_REM /** @todo fix me? */
836 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
837 rc = emR3RemStep(pVM, pVCpu);
838#endif
839 else
840 {
841 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
842 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
843 rc = VINF_EM_DBG_STEPPED;
844 }
845 break;
846
847 /*
848 * Simple events: stepped, breakpoint, stop/assertion.
849 */
850 case VINF_EM_DBG_STEPPED:
851 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
852 break;
853
854 case VINF_EM_DBG_BREAKPOINT:
855 rc = DBGFR3BpHit(pVM, pVCpu);
856 break;
857
858 case VINF_EM_DBG_STOP:
859 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
860 break;
861
862 case VINF_EM_DBG_EVENT:
863 rc = DBGFR3EventHandlePending(pVM, pVCpu);
864 break;
865
866 case VINF_EM_DBG_HYPER_STEPPED:
867 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
868 break;
869
870 case VINF_EM_DBG_HYPER_BREAKPOINT:
871 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
872 break;
873
874 case VINF_EM_DBG_HYPER_ASSERTION:
875 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
876 RTLogFlush(NULL);
877 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
878 break;
879
880 /*
881 * Guru meditation.
882 */
883 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
884 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
885 break;
886 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
887 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
888 break;
889 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
890 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
891 break;
892
893 default: /** @todo don't use default for guru, but make special errors code! */
894 {
895 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
896 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
897 break;
898 }
899 }
900
901 /*
902 * Process the result.
903 */
904 switch (VBOXSTRICTRC_VAL(rc))
905 {
906 /*
907 * Continue the debugging loop.
908 */
909 case VINF_EM_DBG_STEP:
910 case VINF_EM_DBG_STOP:
911 case VINF_EM_DBG_EVENT:
912 case VINF_EM_DBG_STEPPED:
913 case VINF_EM_DBG_BREAKPOINT:
914 case VINF_EM_DBG_HYPER_STEPPED:
915 case VINF_EM_DBG_HYPER_BREAKPOINT:
916 case VINF_EM_DBG_HYPER_ASSERTION:
917 break;
918
919 /*
920 * Resuming execution (in some form) has to be done here if we got
921 * a hypervisor debug event.
922 */
923 case VINF_SUCCESS:
924 case VINF_EM_RESUME:
925 case VINF_EM_SUSPEND:
926 case VINF_EM_RESCHEDULE:
927 case VINF_EM_RESCHEDULE_RAW:
928 case VINF_EM_RESCHEDULE_REM:
929 case VINF_EM_HALT:
930 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
931 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
932 if (rc == VINF_SUCCESS)
933 rc = VINF_EM_RESCHEDULE;
934 return rc;
935
936 /*
937 * The debugger isn't attached.
938 * We'll simply turn the thing off since that's the easiest thing to do.
939 */
940 case VERR_DBGF_NOT_ATTACHED:
941 switch (VBOXSTRICTRC_VAL(rcLast))
942 {
943 case VINF_EM_DBG_HYPER_STEPPED:
944 case VINF_EM_DBG_HYPER_BREAKPOINT:
945 case VINF_EM_DBG_HYPER_ASSERTION:
946 case VERR_TRPM_PANIC:
947 case VERR_TRPM_DONT_PANIC:
948 case VERR_VMM_RING0_ASSERTION:
949 case VERR_VMM_HYPER_CR3_MISMATCH:
950 case VERR_VMM_RING3_CALL_DISABLED:
951 return rcLast;
952 }
953 return VINF_EM_OFF;
954
955 /*
956 * Status codes terminating the VM in one or another sense.
957 */
958 case VINF_EM_TERMINATE:
959 case VINF_EM_OFF:
960 case VINF_EM_RESET:
961 case VINF_EM_NO_MEMORY:
962 case VINF_EM_RAW_STALE_SELECTOR:
963 case VINF_EM_RAW_IRET_TRAP:
964 case VERR_TRPM_PANIC:
965 case VERR_TRPM_DONT_PANIC:
966 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
967 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
968 case VERR_VMM_RING0_ASSERTION:
969 case VERR_VMM_HYPER_CR3_MISMATCH:
970 case VERR_VMM_RING3_CALL_DISABLED:
971 case VERR_INTERNAL_ERROR:
972 case VERR_INTERNAL_ERROR_2:
973 case VERR_INTERNAL_ERROR_3:
974 case VERR_INTERNAL_ERROR_4:
975 case VERR_INTERNAL_ERROR_5:
976 case VERR_IPE_UNEXPECTED_STATUS:
977 case VERR_IPE_UNEXPECTED_INFO_STATUS:
978 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
979 return rc;
980
981 /*
982 * The rest is unexpected, and will keep us here.
983 */
984 default:
985 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
986 break;
987 }
988 } /* debug for ever */
989}
990
991
992#if defined(VBOX_WITH_REM) || defined(DEBUG)
993/**
994 * Steps recompiled code.
995 *
996 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
997 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
998 *
999 * @param pVM The cross context VM structure.
1000 * @param pVCpu The cross context virtual CPU structure.
1001 */
1002static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1003{
1004 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1005
1006 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1007
1008 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1009 return rc;
1010}
1011#endif /* VBOX_WITH_REM || DEBUG */
1012
1013
1014/**
1015 * Executes recompiled code.
1016 *
1017 * This function contains the recompiler version of the inner
1018 * execution loop (the outer loop being in EMR3ExecuteVM()).
1019 *
1020 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1021 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1022 *
1023 * @param pVM The cross context VM structure.
1024 * @param pVCpu The cross context virtual CPU structure.
1025 * @param pfFFDone Where to store an indicator telling whether or not
1026 * FFs were done before returning.
1027 *
1028 */
1029static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1030{
1031#ifdef LOG_ENABLED
1032 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1033
1034 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1035 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1036 else
1037 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1038#endif
1039 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1040
1041#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1042 AssertMsg( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1043 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1044 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1045#endif
1046
1047 /*
1048 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1049 * or the REM suggests raw-mode execution.
1050 */
1051 *pfFFDone = false;
1052 uint32_t cLoops = 0;
1053 int rc = VINF_SUCCESS;
1054 for (;;)
1055 {
1056 /*
1057 * Execute REM.
1058 */
1059 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1060 {
1061 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1062 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1063 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1064 }
1065 else
1066 {
1067 /* Give up this time slice; virtual time continues */
1068 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1069 RTThreadSleep(5);
1070 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1071 rc = VINF_SUCCESS;
1072 }
1073
1074 /*
1075 * Deal with high priority post execution FFs before doing anything
1076 * else. Sync back the state and leave the lock to be on the safe side.
1077 */
1078 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1079 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1080 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1081
1082 /*
1083 * Process the returned status code.
1084 */
1085 if (rc != VINF_SUCCESS)
1086 {
1087 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1088 break;
1089 if (rc != VINF_REM_INTERRUPED_FF)
1090 {
1091 /* Try dodge unimplemented IEM trouble by reschduling. */
1092 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1093 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1094 {
1095 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1096 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1097 {
1098 rc = VINF_EM_RESCHEDULE;
1099 break;
1100 }
1101 }
1102
1103 /*
1104 * Anything which is not known to us means an internal error
1105 * and the termination of the VM!
1106 */
1107 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1108 break;
1109 }
1110 }
1111
1112
1113 /*
1114 * Check and execute forced actions.
1115 *
1116 * Sync back the VM state and leave the lock before calling any of
1117 * these, you never know what's going to happen here.
1118 */
1119#ifdef VBOX_HIGH_RES_TIMERS_HACK
1120 TMTimerPollVoid(pVM, pVCpu);
1121#endif
1122 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1123 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1124 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1125 {
1126 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1127 rc = emR3ForcedActions(pVM, pVCpu, rc);
1128 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1129 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1130 if ( rc != VINF_SUCCESS
1131 && rc != VINF_EM_RESCHEDULE_REM)
1132 {
1133 *pfFFDone = true;
1134 break;
1135 }
1136 }
1137
1138 /*
1139 * Have to check if we can get back to fast execution mode every so often.
1140 */
1141 if (!(++cLoops & 7))
1142 {
1143 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1144 if ( enmCheck != EMSTATE_REM
1145 && enmCheck != EMSTATE_IEM_THEN_REM)
1146 {
1147 LogFlow(("emR3RemExecute: emR3Reschedule -> %d -> VINF_EM_RESCHEDULE\n", enmCheck));
1148 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1149 return VINF_EM_RESCHEDULE;
1150 }
1151 Log2(("emR3RemExecute: emR3Reschedule -> %d\n", enmCheck));
1152 }
1153
1154 } /* The Inner Loop, recompiled execution mode version. */
1155
1156 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1157 return rc;
1158}
1159
1160
1161#ifdef DEBUG
1162
1163int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1164{
1165 EMSTATE enmOldState = pVCpu->em.s.enmState;
1166
1167 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1168
1169 Log(("Single step BEGIN:\n"));
1170 for (uint32_t i = 0; i < cIterations; i++)
1171 {
1172 DBGFR3PrgStep(pVCpu);
1173 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1174 emR3RemStep(pVM, pVCpu);
1175 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1176 break;
1177 }
1178 Log(("Single step END:\n"));
1179 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1180 pVCpu->em.s.enmState = enmOldState;
1181 return VINF_EM_RESCHEDULE;
1182}
1183
1184#endif /* DEBUG */
1185
1186
1187/**
1188 * Try execute the problematic code in IEM first, then fall back on REM if there
1189 * is too much of it or if IEM doesn't implement something.
1190 *
1191 * @returns Strict VBox status code from IEMExecLots.
1192 * @param pVM The cross context VM structure.
1193 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1194 * @param pfFFDone Force flags done indicator.
1195 *
1196 * @thread EMT(pVCpu)
1197 */
1198static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1199{
1200 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1201 *pfFFDone = false;
1202
1203 /*
1204 * Execute in IEM for a while.
1205 */
1206 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1207 {
1208 uint32_t cInstructions;
1209 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1210 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1211 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1212 if (rcStrict != VINF_SUCCESS)
1213 {
1214 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1215 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1216 break;
1217
1218 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1219 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1220 return rcStrict;
1221 }
1222
1223 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1224 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1225 {
1226 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1227 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1228 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1229 pVCpu->em.s.enmState = enmNewState;
1230 return VINF_SUCCESS;
1231 }
1232
1233 /*
1234 * Check for pending actions.
1235 */
1236 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1237 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1238 return VINF_SUCCESS;
1239 }
1240
1241 /*
1242 * Switch to REM.
1243 */
1244 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1245 pVCpu->em.s.enmState = EMSTATE_REM;
1246 return VINF_SUCCESS;
1247}
1248
1249
1250/**
1251 * Decides whether to execute RAW, HWACC or REM.
1252 *
1253 * @returns new EM state
1254 * @param pVM The cross context VM structure.
1255 * @param pVCpu The cross context virtual CPU structure.
1256 */
1257EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1258{
1259 /*
1260 * We stay in the wait for SIPI state unless explicitly told otherwise.
1261 */
1262 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1263 return EMSTATE_WAIT_SIPI;
1264
1265 /*
1266 * Execute everything in IEM?
1267 */
1268 if (pVM->em.s.fIemExecutesAll)
1269 return EMSTATE_IEM;
1270
1271 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1272 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1273 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1274
1275 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1276 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1277 {
1278 if (VM_IS_HM_ENABLED(pVM))
1279 {
1280 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1281 return EMSTATE_HM;
1282 }
1283 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1284 return EMSTATE_NEM;
1285
1286 /*
1287 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1288 * turns off monitoring features essential for raw mode!
1289 */
1290 return EMSTATE_IEM_THEN_REM;
1291 }
1292
1293 /*
1294 * Standard raw-mode:
1295 *
1296 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1297 * or 32 bits protected mode ring 0 code
1298 *
1299 * The tests are ordered by the likelihood of being true during normal execution.
1300 */
1301 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1302 {
1303 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1304 return EMSTATE_REM;
1305 }
1306
1307# ifndef VBOX_RAW_V86
1308 if (EFlags.u32 & X86_EFL_VM) {
1309 Log2(("raw mode refused: VM_MASK\n"));
1310 return EMSTATE_REM;
1311 }
1312# endif
1313
1314 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1315 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1316 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1317 {
1318 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1319 return EMSTATE_REM;
1320 }
1321
1322 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1323 {
1324 uint32_t u32Dummy, u32Features;
1325
1326 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1327 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1328 return EMSTATE_REM;
1329 }
1330
1331 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1332 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1333 || (uSS & X86_SEL_RPL) == 3)
1334 {
1335 if (!(EFlags.u32 & X86_EFL_IF))
1336 {
1337 Log2(("raw mode refused: IF (RawR3)\n"));
1338 return EMSTATE_REM;
1339 }
1340
1341 if (!(u32CR0 & X86_CR0_WP))
1342 {
1343 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1344 return EMSTATE_REM;
1345 }
1346 }
1347 else
1348 {
1349 /* Only ring 0 supervisor code. */
1350 if ((uSS & X86_SEL_RPL) != 0)
1351 {
1352 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1353 return EMSTATE_REM;
1354 }
1355
1356 // Let's start with pure 32 bits ring 0 code first
1357 /** @todo What's pure 32-bit mode? flat? */
1358 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1359 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1360 {
1361 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1362 return EMSTATE_REM;
1363 }
1364
1365 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1366 if (!(u32CR0 & X86_CR0_WP))
1367 {
1368 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1369 return EMSTATE_REM;
1370 }
1371
1372# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1373 if (!(EFlags.u32 & X86_EFL_IF))
1374 {
1375 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1376 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1377 return EMSTATE_REM;
1378 }
1379# endif
1380
1381# ifndef VBOX_WITH_RAW_RING1
1382 /** @todo still necessary??? */
1383 if (EFlags.Bits.u2IOPL != 0)
1384 {
1385 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1386 return EMSTATE_REM;
1387 }
1388# endif
1389 }
1390
1391 /*
1392 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1393 */
1394 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1395 {
1396 Log2(("raw mode refused: stale CS\n"));
1397 return EMSTATE_REM;
1398 }
1399 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1400 {
1401 Log2(("raw mode refused: stale SS\n"));
1402 return EMSTATE_REM;
1403 }
1404 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1405 {
1406 Log2(("raw mode refused: stale DS\n"));
1407 return EMSTATE_REM;
1408 }
1409 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1410 {
1411 Log2(("raw mode refused: stale ES\n"));
1412 return EMSTATE_REM;
1413 }
1414 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1415 {
1416 Log2(("raw mode refused: stale FS\n"));
1417 return EMSTATE_REM;
1418 }
1419 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1420 {
1421 Log2(("raw mode refused: stale GS\n"));
1422 return EMSTATE_REM;
1423 }
1424
1425# ifdef VBOX_WITH_SAFE_STR
1426 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1427 {
1428 Log(("Raw mode refused -> TR=0\n"));
1429 return EMSTATE_REM;
1430 }
1431# endif
1432
1433 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1434 return EMSTATE_RAW;
1435}
1436
1437
1438/**
1439 * Executes all high priority post execution force actions.
1440 *
1441 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1442 * fatal error status code.
1443 *
1444 * @param pVM The cross context VM structure.
1445 * @param pVCpu The cross context virtual CPU structure.
1446 * @param rc The current strict VBox status code rc.
1447 */
1448VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1449{
1450 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1451
1452 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1453 PDMCritSectBothFF(pVM, pVCpu);
1454
1455 /* Update CR3 (Nested Paging case for HM). */
1456 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1457 {
1458 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1459 int const rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu), false /* fPdpesMapped */);
1460 if (RT_FAILURE(rc2))
1461 return rc2;
1462 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1463 }
1464
1465 /* IEM has pending work (typically memory write after INS instruction). */
1466 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1467 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1468
1469 /* IOM has pending work (comitting an I/O or MMIO write). */
1470 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1471 {
1472 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1473 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1474 { /* half likely, or at least it's a line shorter. */ }
1475 else if (rc == VINF_SUCCESS)
1476 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1477 else
1478 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1479 }
1480
1481 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1482 {
1483 if ( rc > VINF_EM_NO_MEMORY
1484 && rc <= VINF_EM_LAST)
1485 rc = VINF_EM_NO_MEMORY;
1486 }
1487
1488 return rc;
1489}
1490
1491
1492/**
1493 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1494 *
1495 * @returns VBox status code.
1496 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1497 * @param pVCpu The cross context virtual CPU structure.
1498 */
1499static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1500{
1501#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1502 /* Handle the "external interrupt" VM-exit intercept. */
1503 if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
1504 {
1505 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1506 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1507 && rcStrict != VINF_VMX_VMEXIT
1508 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1509 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1510 return VBOXSTRICTRC_TODO(rcStrict);
1511 }
1512#else
1513 RT_NOREF(pVCpu);
1514#endif
1515 return VINF_NO_CHANGE;
1516}
1517
1518
1519/**
1520 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1521 *
1522 * @returns VBox status code.
1523 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1524 * @param pVCpu The cross context virtual CPU structure.
1525 */
1526static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1527{
1528#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1529 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1530 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1531 {
1532 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1533 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1534 if (RT_SUCCESS(rcStrict))
1535 {
1536 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1537 && rcStrict != VINF_SVM_VMEXIT
1538 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1539 return VBOXSTRICTRC_VAL(rcStrict);
1540 }
1541
1542 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1543 return VINF_EM_TRIPLE_FAULT;
1544 }
1545#else
1546 NOREF(pVCpu);
1547#endif
1548 return VINF_NO_CHANGE;
1549}
1550
1551
1552/**
1553 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1554 *
1555 * @returns VBox status code.
1556 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1557 * @param pVCpu The cross context virtual CPU structure.
1558 */
1559static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1560{
1561#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1562 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1563 {
1564 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1565 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1566 if (RT_SUCCESS(rcStrict))
1567 {
1568 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1569 Assert(rcStrict != VINF_SVM_VMEXIT);
1570 return VBOXSTRICTRC_VAL(rcStrict);
1571 }
1572 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1573 return VINF_EM_TRIPLE_FAULT;
1574 }
1575#else
1576 NOREF(pVCpu);
1577#endif
1578 return VINF_NO_CHANGE;
1579}
1580
1581
1582/**
1583 * Executes all pending forced actions.
1584 *
1585 * Forced actions can cause execution delays and execution
1586 * rescheduling. The first we deal with using action priority, so
1587 * that for instance pending timers aren't scheduled and ran until
1588 * right before execution. The rescheduling we deal with using
1589 * return codes. The same goes for VM termination, only in that case
1590 * we exit everything.
1591 *
1592 * @returns VBox status code of equal or greater importance/severity than rc.
1593 * The most important ones are: VINF_EM_RESCHEDULE,
1594 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1595 *
1596 * @param pVM The cross context VM structure.
1597 * @param pVCpu The cross context virtual CPU structure.
1598 * @param rc The current rc.
1599 *
1600 */
1601int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1602{
1603 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1604#ifdef VBOX_STRICT
1605 int rcIrq = VINF_SUCCESS;
1606#endif
1607 int rc2;
1608#define UPDATE_RC() \
1609 do { \
1610 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1611 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1612 break; \
1613 if (!rc || rc2 < rc) \
1614 rc = rc2; \
1615 } while (0)
1616 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1617
1618 /*
1619 * Post execution chunk first.
1620 */
1621 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1622 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1623 {
1624 /*
1625 * EMT Rendezvous (must be serviced before termination).
1626 */
1627 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1628 {
1629 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1630 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1631 UPDATE_RC();
1632 /** @todo HACK ALERT! The following test is to make sure EM+TM
1633 * thinks the VM is stopped/reset before the next VM state change
1634 * is made. We need a better solution for this, or at least make it
1635 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1636 * VINF_EM_SUSPEND). */
1637 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1638 {
1639 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1640 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1641 return rc;
1642 }
1643 }
1644
1645 /*
1646 * State change request (cleared by vmR3SetStateLocked).
1647 */
1648 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1649 {
1650 VMSTATE enmState = VMR3GetState(pVM);
1651 switch (enmState)
1652 {
1653 case VMSTATE_FATAL_ERROR:
1654 case VMSTATE_FATAL_ERROR_LS:
1655 case VMSTATE_GURU_MEDITATION:
1656 case VMSTATE_GURU_MEDITATION_LS:
1657 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1658 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1659 return VINF_EM_SUSPEND;
1660
1661 case VMSTATE_DESTROYING:
1662 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1663 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1664 return VINF_EM_TERMINATE;
1665
1666 default:
1667 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1668 }
1669 }
1670
1671 /*
1672 * Debugger Facility polling.
1673 */
1674 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1675 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1676 {
1677 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1678 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1679 UPDATE_RC();
1680 }
1681
1682 /*
1683 * Postponed reset request.
1684 */
1685 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1686 {
1687 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1688 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1689 UPDATE_RC();
1690 }
1691
1692 /*
1693 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1694 */
1695 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1696 {
1697 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1698 UPDATE_RC();
1699 if (rc == VINF_EM_NO_MEMORY)
1700 return rc;
1701 }
1702
1703 /* check that we got them all */
1704 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1705 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1706 }
1707
1708 /*
1709 * Normal priority then.
1710 * (Executed in no particular order.)
1711 */
1712 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1713 {
1714 /*
1715 * PDM Queues are pending.
1716 */
1717 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1718 PDMR3QueueFlushAll(pVM);
1719
1720 /*
1721 * PDM DMA transfers are pending.
1722 */
1723 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1724 PDMR3DmaRun(pVM);
1725
1726 /*
1727 * EMT Rendezvous (make sure they are handled before the requests).
1728 */
1729 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1730 {
1731 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1732 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1733 UPDATE_RC();
1734 /** @todo HACK ALERT! The following test is to make sure EM+TM
1735 * thinks the VM is stopped/reset before the next VM state change
1736 * is made. We need a better solution for this, or at least make it
1737 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1738 * VINF_EM_SUSPEND). */
1739 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1740 {
1741 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1742 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1743 return rc;
1744 }
1745 }
1746
1747 /*
1748 * Requests from other threads.
1749 */
1750 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1751 {
1752 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1753 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1754 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1755 {
1756 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1757 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1758 return rc2;
1759 }
1760 UPDATE_RC();
1761 /** @todo HACK ALERT! The following test is to make sure EM+TM
1762 * thinks the VM is stopped/reset before the next VM state change
1763 * is made. We need a better solution for this, or at least make it
1764 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1765 * VINF_EM_SUSPEND). */
1766 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1767 {
1768 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1769 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1770 return rc;
1771 }
1772 }
1773
1774 /* check that we got them all */
1775 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1776 }
1777
1778 /*
1779 * Normal priority then. (per-VCPU)
1780 * (Executed in no particular order.)
1781 */
1782 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1783 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1784 {
1785 /*
1786 * Requests from other threads.
1787 */
1788 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1789 {
1790 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1791 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1792 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1793 {
1794 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1795 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1796 return rc2;
1797 }
1798 UPDATE_RC();
1799 /** @todo HACK ALERT! The following test is to make sure EM+TM
1800 * thinks the VM is stopped/reset before the next VM state change
1801 * is made. We need a better solution for this, or at least make it
1802 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1803 * VINF_EM_SUSPEND). */
1804 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1805 {
1806 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1807 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1808 return rc;
1809 }
1810 }
1811
1812 /* check that we got them all */
1813 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1814 }
1815
1816 /*
1817 * High priority pre execution chunk last.
1818 * (Executed in ascending priority order.)
1819 */
1820 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1821 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1822 {
1823 /*
1824 * Timers before interrupts.
1825 */
1826 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1827 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1828 TMR3TimerQueuesDo(pVM);
1829
1830 /*
1831 * Pick up asynchronously posted interrupts into the APIC.
1832 */
1833 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1834 APICUpdatePendingInterrupts(pVCpu);
1835
1836 /*
1837 * The instruction following an emulated STI should *always* be executed!
1838 *
1839 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1840 * the eip is the same as the inhibited instr address. Before we
1841 * are able to execute this instruction in raw mode (iret to
1842 * guest code) an external interrupt might force a world switch
1843 * again. Possibly allowing a guest interrupt to be dispatched
1844 * in the process. This could break the guest. Sounds very
1845 * unlikely, but such timing sensitive problem are not as rare as
1846 * you might think.
1847 */
1848 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1849 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1850 {
1851 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1852 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1853 {
1854 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1855 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1856 }
1857 else
1858 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1859 }
1860
1861 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1862 * delivered. */
1863
1864#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1865 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
1866 {
1867 /*
1868 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1869 * Takes priority over even SMI and INIT signals.
1870 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1871 */
1872 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1873 {
1874 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1875 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1876 UPDATE_RC();
1877 }
1878
1879 /*
1880 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1881 * Takes priority over "Traps on the previous instruction".
1882 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1883 */
1884 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1885 {
1886 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1887 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1888 UPDATE_RC();
1889 }
1890
1891 /*
1892 * VMX Nested-guest preemption timer VM-exit.
1893 * Takes priority over NMI-window VM-exits.
1894 */
1895 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1896 {
1897 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1898 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1899 UPDATE_RC();
1900 }
1901 }
1902#endif
1903
1904 /*
1905 * Guest event injection.
1906 */
1907 bool fWakeupPending = false;
1908 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1909 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1910 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
1911 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1912 {
1913 bool fInVmxNonRootMode;
1914 bool fInSvmHwvirtMode;
1915 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
1916 if (fInNestedGuest)
1917 {
1918 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1919 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1920 }
1921 else
1922 {
1923 fInVmxNonRootMode = false;
1924 fInSvmHwvirtMode = false;
1925 }
1926
1927 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
1928 if (fGif)
1929 {
1930#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1931 /*
1932 * VMX NMI-window VM-exit.
1933 * Takes priority over non-maskable interrupts (NMIs).
1934 * Interrupt shadows block NMI-window VM-exits.
1935 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1936 *
1937 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1938 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1939 */
1940 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1941 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1942 {
1943 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1944 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1945 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1946 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1947 && rc2 != VINF_PGM_CHANGE_MODE
1948 && rc2 != VINF_VMX_VMEXIT
1949 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1950 UPDATE_RC();
1951 }
1952 else
1953#endif
1954 /*
1955 * NMIs (take priority over external interrupts).
1956 */
1957 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1958 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1959 {
1960#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1961 if ( fInVmxNonRootMode
1962 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1963 {
1964 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1965 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1966 UPDATE_RC();
1967 }
1968 else
1969#endif
1970#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1971 if ( fInSvmHwvirtMode
1972 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1973 {
1974 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1975 AssertMsg( rc2 != VINF_PGM_CHANGE_MODE
1976 && rc2 != VINF_SVM_VMEXIT
1977 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1978 UPDATE_RC();
1979 }
1980 else
1981#endif
1982 {
1983 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1984 if (rc2 == VINF_SUCCESS)
1985 {
1986 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1987 fWakeupPending = true;
1988 if (pVM->em.s.fIemExecutesAll)
1989 rc2 = VINF_EM_RESCHEDULE;
1990 else
1991 {
1992 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1993 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1994 : VINF_EM_RESCHEDULE_REM;
1995 }
1996 }
1997 UPDATE_RC();
1998 }
1999 }
2000#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2001 /*
2002 * VMX Interrupt-window VM-exits.
2003 * Takes priority over external interrupts.
2004 */
2005 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
2006 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
2007 {
2008 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
2009 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
2010 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
2011 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2012 && rc2 != VINF_PGM_CHANGE_MODE
2013 && rc2 != VINF_VMX_VMEXIT
2014 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2015 UPDATE_RC();
2016 }
2017#endif
2018#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2019 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
2020 * actually pending like we currently do. */
2021#endif
2022 /*
2023 * External interrupts.
2024 */
2025 else
2026 {
2027 /*
2028 * VMX: virtual interrupts takes priority over physical interrupts.
2029 * SVM: physical interrupts takes priority over virtual interrupts.
2030 */
2031 if ( fInVmxNonRootMode
2032 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2033 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
2034 {
2035 /** @todo NSTVMX: virtual-interrupt delivery. */
2036 rc2 = VINF_SUCCESS;
2037 }
2038 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2039 && CPUMIsGuestPhysIntrEnabled(pVCpu))
2040 {
2041 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2042 if (fInVmxNonRootMode)
2043 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
2044 else if (fInSvmHwvirtMode)
2045 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
2046 else
2047 rc2 = VINF_NO_CHANGE;
2048
2049 if (rc2 == VINF_NO_CHANGE)
2050 {
2051 bool fInjected = false;
2052 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2053 /** @todo this really isn't nice, should properly handle this */
2054 /* Note! This can still cause a VM-exit (on Intel). */
2055 LogFlow(("Calling TRPMR3InjectEvent: %04x:%08RX64 efl=%#x\n",
2056 pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.eflags));
2057 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2058 fWakeupPending = true;
2059 if ( pVM->em.s.fIemExecutesAll
2060 && ( rc2 == VINF_EM_RESCHEDULE_REM
2061 || rc2 == VINF_EM_RESCHEDULE_HM
2062 || rc2 == VINF_EM_RESCHEDULE_RAW))
2063 {
2064 rc2 = VINF_EM_RESCHEDULE;
2065 }
2066#ifdef VBOX_STRICT
2067 if (fInjected)
2068 rcIrq = rc2;
2069#endif
2070 }
2071 UPDATE_RC();
2072 }
2073 else if ( fInSvmHwvirtMode
2074 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2075 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2076 {
2077 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2078 if (rc2 == VINF_NO_CHANGE)
2079 {
2080 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2081 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2082 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2083 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2084 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2085 rc2 = VINF_EM_RESCHEDULE;
2086#ifdef VBOX_STRICT
2087 rcIrq = rc2;
2088#endif
2089 }
2090 UPDATE_RC();
2091 }
2092 }
2093 }
2094 }
2095
2096 /*
2097 * Allocate handy pages.
2098 */
2099 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2100 {
2101 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2102 UPDATE_RC();
2103 }
2104
2105 /*
2106 * Debugger Facility request.
2107 */
2108 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2109 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2110 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2111 {
2112 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2113 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2114 UPDATE_RC();
2115 }
2116
2117 /*
2118 * EMT Rendezvous (must be serviced before termination).
2119 */
2120 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2121 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2122 {
2123 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2124 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2125 UPDATE_RC();
2126 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2127 * stopped/reset before the next VM state change is made. We need a better
2128 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2129 * && rc >= VINF_EM_SUSPEND). */
2130 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2131 {
2132 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2133 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2134 return rc;
2135 }
2136 }
2137
2138 /*
2139 * State change request (cleared by vmR3SetStateLocked).
2140 */
2141 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2142 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2143 {
2144 VMSTATE enmState = VMR3GetState(pVM);
2145 switch (enmState)
2146 {
2147 case VMSTATE_FATAL_ERROR:
2148 case VMSTATE_FATAL_ERROR_LS:
2149 case VMSTATE_GURU_MEDITATION:
2150 case VMSTATE_GURU_MEDITATION_LS:
2151 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2152 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2153 return VINF_EM_SUSPEND;
2154
2155 case VMSTATE_DESTROYING:
2156 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2157 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2158 return VINF_EM_TERMINATE;
2159
2160 default:
2161 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2162 }
2163 }
2164
2165 /*
2166 * Out of memory? Since most of our fellow high priority actions may cause us
2167 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2168 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2169 * than us since we can terminate without allocating more memory.
2170 */
2171 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2172 {
2173 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2174 UPDATE_RC();
2175 if (rc == VINF_EM_NO_MEMORY)
2176 return rc;
2177 }
2178
2179 /*
2180 * If the virtual sync clock is still stopped, make TM restart it.
2181 */
2182 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2183 TMR3VirtualSyncFF(pVM, pVCpu);
2184
2185#ifdef DEBUG
2186 /*
2187 * Debug, pause the VM.
2188 */
2189 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2190 {
2191 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2192 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2193 return VINF_EM_SUSPEND;
2194 }
2195#endif
2196
2197 /* check that we got them all */
2198 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2199 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2200 }
2201
2202#undef UPDATE_RC
2203 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2204 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2205 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2206 return rc;
2207}
2208
2209
2210/**
2211 * Check if the preset execution time cap restricts guest execution scheduling.
2212 *
2213 * @returns true if allowed, false otherwise
2214 * @param pVM The cross context VM structure.
2215 * @param pVCpu The cross context virtual CPU structure.
2216 */
2217bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2218{
2219 uint64_t u64UserTime, u64KernelTime;
2220
2221 if ( pVM->uCpuExecutionCap != 100
2222 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2223 {
2224 uint64_t u64TimeNow = RTTimeMilliTS();
2225 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2226 {
2227 /* New time slice. */
2228 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2229 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2230 pVCpu->em.s.u64TimeSliceExec = 0;
2231 }
2232 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2233
2234 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2235 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2236 return false;
2237 }
2238 return true;
2239}
2240
2241
2242/**
2243 * Execute VM.
2244 *
2245 * This function is the main loop of the VM. The emulation thread
2246 * calls this function when the VM has been successfully constructed
2247 * and we're ready for executing the VM.
2248 *
2249 * Returning from this function means that the VM is turned off or
2250 * suspended (state already saved) and deconstruction is next in line.
2251 *
2252 * All interaction from other thread are done using forced actions
2253 * and signalling of the wait object.
2254 *
2255 * @returns VBox status code, informational status codes may indicate failure.
2256 * @param pVM The cross context VM structure.
2257 * @param pVCpu The cross context virtual CPU structure.
2258 */
2259VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2260{
2261 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2262 pVM,
2263 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2264 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2265 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2266 VM_ASSERT_EMT(pVM);
2267 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2268 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2269 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2270 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2271
2272 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2273 if (rc == 0)
2274 {
2275 /*
2276 * Start the virtual time.
2277 */
2278 TMR3NotifyResume(pVM, pVCpu);
2279
2280 /*
2281 * The Outer Main Loop.
2282 */
2283 bool fFFDone = false;
2284
2285 /* Reschedule right away to start in the right state. */
2286 rc = VINF_SUCCESS;
2287
2288 /* If resuming after a pause or a state load, restore the previous
2289 state or else we'll start executing code. Else, just reschedule. */
2290 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2291 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2292 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2293 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2294 else
2295 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2296 pVCpu->em.s.cIemThenRemInstructions = 0;
2297 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2298
2299 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2300 for (;;)
2301 {
2302 /*
2303 * Before we can schedule anything (we're here because
2304 * scheduling is required) we must service any pending
2305 * forced actions to avoid any pending action causing
2306 * immediate rescheduling upon entering an inner loop
2307 *
2308 * Do forced actions.
2309 */
2310 if ( !fFFDone
2311 && RT_SUCCESS(rc)
2312 && rc != VINF_EM_TERMINATE
2313 && rc != VINF_EM_OFF
2314 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2315 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2316 {
2317 rc = emR3ForcedActions(pVM, pVCpu, rc);
2318 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2319 }
2320 else if (fFFDone)
2321 fFFDone = false;
2322
2323 /*
2324 * Now what to do?
2325 */
2326 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2327 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2328 switch (rc)
2329 {
2330 /*
2331 * Keep doing what we're currently doing.
2332 */
2333 case VINF_SUCCESS:
2334 break;
2335
2336 /*
2337 * Reschedule - to raw-mode execution.
2338 */
2339/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2340 case VINF_EM_RESCHEDULE_RAW:
2341 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2342 if (VM_IS_RAW_MODE_ENABLED(pVM))
2343 {
2344 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2345 pVCpu->em.s.enmState = EMSTATE_RAW;
2346 }
2347 else
2348 {
2349 AssertLogRelFailed();
2350 pVCpu->em.s.enmState = EMSTATE_NONE;
2351 }
2352 break;
2353
2354 /*
2355 * Reschedule - to HM or NEM.
2356 */
2357 case VINF_EM_RESCHEDULE_HM:
2358 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2359 if (VM_IS_HM_ENABLED(pVM))
2360 {
2361 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2362 pVCpu->em.s.enmState = EMSTATE_HM;
2363 }
2364 else if (VM_IS_NEM_ENABLED(pVM))
2365 {
2366 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2367 pVCpu->em.s.enmState = EMSTATE_NEM;
2368 }
2369 else
2370 {
2371 AssertLogRelFailed();
2372 pVCpu->em.s.enmState = EMSTATE_NONE;
2373 }
2374 break;
2375
2376 /*
2377 * Reschedule - to recompiled execution.
2378 */
2379 case VINF_EM_RESCHEDULE_REM:
2380 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2381 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2382 {
2383 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2384 enmOldState, EMSTATE_IEM_THEN_REM));
2385 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2386 {
2387 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2388 pVCpu->em.s.cIemThenRemInstructions = 0;
2389 }
2390 }
2391 else
2392 {
2393 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2394 pVCpu->em.s.enmState = EMSTATE_REM;
2395 }
2396 break;
2397
2398 /*
2399 * Resume.
2400 */
2401 case VINF_EM_RESUME:
2402 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2403 /* Don't reschedule in the halted or wait for SIPI case. */
2404 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2405 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2406 {
2407 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2408 break;
2409 }
2410 /* fall through and get scheduled. */
2411 RT_FALL_THRU();
2412
2413 /*
2414 * Reschedule.
2415 */
2416 case VINF_EM_RESCHEDULE:
2417 {
2418 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2419 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2420 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2421 pVCpu->em.s.cIemThenRemInstructions = 0;
2422 pVCpu->em.s.enmState = enmState;
2423 break;
2424 }
2425
2426 /*
2427 * Halted.
2428 */
2429 case VINF_EM_HALT:
2430 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2431 pVCpu->em.s.enmState = EMSTATE_HALTED;
2432 break;
2433
2434 /*
2435 * Switch to the wait for SIPI state (application processor only)
2436 */
2437 case VINF_EM_WAIT_SIPI:
2438 Assert(pVCpu->idCpu != 0);
2439 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2440 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2441 break;
2442
2443
2444 /*
2445 * Suspend.
2446 */
2447 case VINF_EM_SUSPEND:
2448 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2449 Assert(enmOldState != EMSTATE_SUSPENDED);
2450 pVCpu->em.s.enmPrevState = enmOldState;
2451 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2452 break;
2453
2454 /*
2455 * Reset.
2456 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2457 */
2458 case VINF_EM_RESET:
2459 {
2460 if (pVCpu->idCpu == 0)
2461 {
2462 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2463 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2464 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2465 pVCpu->em.s.cIemThenRemInstructions = 0;
2466 pVCpu->em.s.enmState = enmState;
2467 }
2468 else
2469 {
2470 /* All other VCPUs go into the wait for SIPI state. */
2471 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2472 }
2473 break;
2474 }
2475
2476 /*
2477 * Power Off.
2478 */
2479 case VINF_EM_OFF:
2480 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2481 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2482 TMR3NotifySuspend(pVM, pVCpu);
2483 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2484 return rc;
2485
2486 /*
2487 * Terminate the VM.
2488 */
2489 case VINF_EM_TERMINATE:
2490 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2491 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2492 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2493 TMR3NotifySuspend(pVM, pVCpu);
2494 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2495 return rc;
2496
2497
2498 /*
2499 * Out of memory, suspend the VM and stuff.
2500 */
2501 case VINF_EM_NO_MEMORY:
2502 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2503 Assert(enmOldState != EMSTATE_SUSPENDED);
2504 pVCpu->em.s.enmPrevState = enmOldState;
2505 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2506 TMR3NotifySuspend(pVM, pVCpu);
2507 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2508
2509 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2510 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2511 if (rc != VINF_EM_SUSPEND)
2512 {
2513 if (RT_SUCCESS_NP(rc))
2514 {
2515 AssertLogRelMsgFailed(("%Rrc\n", rc));
2516 rc = VERR_EM_INTERNAL_ERROR;
2517 }
2518 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2519 }
2520 return rc;
2521
2522 /*
2523 * Guest debug events.
2524 */
2525 case VINF_EM_DBG_STEPPED:
2526 case VINF_EM_DBG_STOP:
2527 case VINF_EM_DBG_EVENT:
2528 case VINF_EM_DBG_BREAKPOINT:
2529 case VINF_EM_DBG_STEP:
2530 if (enmOldState == EMSTATE_RAW)
2531 {
2532 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2533 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2534 }
2535 else if (enmOldState == EMSTATE_HM)
2536 {
2537 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2538 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2539 }
2540 else if (enmOldState == EMSTATE_NEM)
2541 {
2542 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2543 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2544 }
2545 else if (enmOldState == EMSTATE_REM)
2546 {
2547 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2548 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2549 }
2550 else
2551 {
2552 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2553 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2554 }
2555 break;
2556
2557 /*
2558 * Hypervisor debug events.
2559 */
2560 case VINF_EM_DBG_HYPER_STEPPED:
2561 case VINF_EM_DBG_HYPER_BREAKPOINT:
2562 case VINF_EM_DBG_HYPER_ASSERTION:
2563 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2564 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2565 break;
2566
2567 /*
2568 * Triple fault.
2569 */
2570 case VINF_EM_TRIPLE_FAULT:
2571 if (!pVM->em.s.fGuruOnTripleFault)
2572 {
2573 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2574 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2575 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2576 continue;
2577 }
2578 /* Else fall through and trigger a guru. */
2579 RT_FALL_THRU();
2580
2581 case VERR_VMM_RING0_ASSERTION:
2582 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2583 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2584 break;
2585
2586 /*
2587 * Any error code showing up here other than the ones we
2588 * know and process above are considered to be FATAL.
2589 *
2590 * Unknown warnings and informational status codes are also
2591 * included in this.
2592 */
2593 default:
2594 if (RT_SUCCESS_NP(rc))
2595 {
2596 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2597 rc = VERR_EM_INTERNAL_ERROR;
2598 }
2599 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2600 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2601 break;
2602 }
2603
2604 /*
2605 * Act on state transition.
2606 */
2607 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2608 if (enmOldState != enmNewState)
2609 {
2610 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2611
2612 /* Clear MWait flags and the unhalt FF. */
2613 if ( enmOldState == EMSTATE_HALTED
2614 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2615 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2616 && ( enmNewState == EMSTATE_RAW
2617 || enmNewState == EMSTATE_HM
2618 || enmNewState == EMSTATE_NEM
2619 || enmNewState == EMSTATE_REM
2620 || enmNewState == EMSTATE_IEM_THEN_REM
2621 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2622 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2623 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2624 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2625 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2626 {
2627 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2628 {
2629 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2630 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2631 }
2632 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2633 {
2634 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2635 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2636 }
2637 }
2638 }
2639 else
2640 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2641
2642 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2643 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2644
2645 /*
2646 * Act on the new state.
2647 */
2648 switch (enmNewState)
2649 {
2650 /*
2651 * Execute raw.
2652 */
2653 case EMSTATE_RAW:
2654 AssertLogRelMsgFailed(("%Rrc\n", rc));
2655 rc = VERR_EM_INTERNAL_ERROR;
2656 break;
2657
2658 /*
2659 * Execute hardware accelerated raw.
2660 */
2661 case EMSTATE_HM:
2662 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2663 break;
2664
2665 /*
2666 * Execute hardware accelerated raw.
2667 */
2668 case EMSTATE_NEM:
2669 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2670 break;
2671
2672 /*
2673 * Execute recompiled.
2674 */
2675 case EMSTATE_REM:
2676 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2677 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2678 break;
2679
2680 /*
2681 * Execute in the interpreter.
2682 */
2683 case EMSTATE_IEM:
2684 {
2685 uint32_t cInstructions = 0;
2686#if 0 /* For testing purposes. */
2687 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2688 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2689 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2690 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2691 rc = VINF_SUCCESS;
2692 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2693#endif
2694 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2695 if (pVM->em.s.fIemExecutesAll)
2696 {
2697 Assert(rc != VINF_EM_RESCHEDULE_REM);
2698 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2699 Assert(rc != VINF_EM_RESCHEDULE_HM);
2700#ifdef VBOX_HIGH_RES_TIMERS_HACK
2701 if (cInstructions < 2048)
2702 TMTimerPollVoid(pVM, pVCpu);
2703#endif
2704 }
2705 fFFDone = false;
2706 break;
2707 }
2708
2709 /*
2710 * Execute in IEM, hoping we can quickly switch aback to HM
2711 * or RAW execution. If our hopes fail, we go to REM.
2712 */
2713 case EMSTATE_IEM_THEN_REM:
2714 {
2715 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2716 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2717 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2718 break;
2719 }
2720
2721 /*
2722 * Application processor execution halted until SIPI.
2723 */
2724 case EMSTATE_WAIT_SIPI:
2725 /* no break */
2726 /*
2727 * hlt - execution halted until interrupt.
2728 */
2729 case EMSTATE_HALTED:
2730 {
2731 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2732 /* If HM (or someone else) store a pending interrupt in
2733 TRPM, it must be dispatched ASAP without any halting.
2734 Anything pending in TRPM has been accepted and the CPU
2735 should already be the right state to receive it. */
2736 if (TRPMHasTrap(pVCpu))
2737 rc = VINF_EM_RESCHEDULE;
2738 /* MWAIT has a special extension where it's woken up when
2739 an interrupt is pending even when IF=0. */
2740 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2741 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2742 {
2743 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2744 if (rc == VINF_SUCCESS)
2745 {
2746 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2747 APICUpdatePendingInterrupts(pVCpu);
2748
2749 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2750 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2751 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2752 {
2753 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2754 rc = VINF_EM_RESCHEDULE;
2755 }
2756 }
2757 }
2758 else
2759 {
2760 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2761 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2762 check VMCPU_FF_UPDATE_APIC here. */
2763 if ( rc == VINF_SUCCESS
2764 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2765 {
2766 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2767 rc = VINF_EM_RESCHEDULE;
2768 }
2769 }
2770
2771 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2772 break;
2773 }
2774
2775 /*
2776 * Suspended - return to VM.cpp.
2777 */
2778 case EMSTATE_SUSPENDED:
2779 TMR3NotifySuspend(pVM, pVCpu);
2780 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2781 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2782 return VINF_EM_SUSPEND;
2783
2784 /*
2785 * Debugging in the guest.
2786 */
2787 case EMSTATE_DEBUG_GUEST_RAW:
2788 case EMSTATE_DEBUG_GUEST_HM:
2789 case EMSTATE_DEBUG_GUEST_NEM:
2790 case EMSTATE_DEBUG_GUEST_IEM:
2791 case EMSTATE_DEBUG_GUEST_REM:
2792 TMR3NotifySuspend(pVM, pVCpu);
2793 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2794 TMR3NotifyResume(pVM, pVCpu);
2795 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2796 break;
2797
2798 /*
2799 * Debugging in the hypervisor.
2800 */
2801 case EMSTATE_DEBUG_HYPER:
2802 {
2803 TMR3NotifySuspend(pVM, pVCpu);
2804 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2805
2806 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2807 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2808 if (rc != VINF_SUCCESS)
2809 {
2810 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2811 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2812 else
2813 {
2814 /* switch to guru meditation mode */
2815 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2816 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2817 VMMR3FatalDump(pVM, pVCpu, rc);
2818 }
2819 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2820 return rc;
2821 }
2822
2823 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2824 TMR3NotifyResume(pVM, pVCpu);
2825 break;
2826 }
2827
2828 /*
2829 * Guru meditation takes place in the debugger.
2830 */
2831 case EMSTATE_GURU_MEDITATION:
2832 {
2833 TMR3NotifySuspend(pVM, pVCpu);
2834 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2835 VMMR3FatalDump(pVM, pVCpu, rc);
2836 emR3Debug(pVM, pVCpu, rc);
2837 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2838 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2839 return rc;
2840 }
2841
2842 /*
2843 * The states we don't expect here.
2844 */
2845 case EMSTATE_NONE:
2846 case EMSTATE_TERMINATING:
2847 default:
2848 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2849 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2850 TMR3NotifySuspend(pVM, pVCpu);
2851 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2852 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2853 return VERR_EM_INTERNAL_ERROR;
2854 }
2855 } /* The Outer Main Loop */
2856 }
2857 else
2858 {
2859 /*
2860 * Fatal error.
2861 */
2862 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2863 TMR3NotifySuspend(pVM, pVCpu);
2864 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2865 VMMR3FatalDump(pVM, pVCpu, rc);
2866 emR3Debug(pVM, pVCpu, rc);
2867 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2868 /** @todo change the VM state! */
2869 return rc;
2870 }
2871
2872 /* not reached */
2873}
2874
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette