VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 80386

Last change on this file since 80386 was 80363, checked in by vboxsync, 6 years ago

VMM/EM: Nested VMX: bugref:9180 Add assertion.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 121.7 KB
Line 
1/* $Id: EM.cpp 80363 2019-08-21 09:58:59Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#include <VBox/vmm/apic.h>
53#include <VBox/vmm/tm.h>
54#include <VBox/vmm/mm.h>
55#include <VBox/vmm/ssm.h>
56#include <VBox/vmm/pdmapi.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/pdmqueue.h>
59#include <VBox/vmm/hm.h>
60#include "EMInternal.h"
61#include <VBox/vmm/vm.h>
62#include <VBox/vmm/uvm.h>
63#include <VBox/vmm/cpumdis.h>
64#include <VBox/dis.h>
65#include <VBox/disopcode.h>
66#include <VBox/err.h>
67#include "VMMTracing.h"
68
69#include <iprt/asm.h>
70#include <iprt/string.h>
71#include <iprt/stream.h>
72#include <iprt/thread.h>
73
74
75/*********************************************************************************************************************************
76* Internal Functions *
77*********************************************************************************************************************************/
78static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
79static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
80#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
81static const char *emR3GetStateName(EMSTATE enmState);
82#endif
83static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
84#if defined(VBOX_WITH_REM) || defined(DEBUG)
85static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
86#endif
87static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
88
89
90/**
91 * Initializes the EM.
92 *
93 * @returns VBox status code.
94 * @param pVM The cross context VM structure.
95 */
96VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
97{
98 LogFlow(("EMR3Init\n"));
99 /*
100 * Assert alignment and sizes.
101 */
102 AssertCompileMemberAlignment(VM, em.s, 32);
103 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
104 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
105 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
106
107 /*
108 * Init the structure.
109 */
110 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
111 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
112
113 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
114 AssertLogRelRCReturn(rc, rc);
115
116 bool fEnabled;
117 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
118 AssertLogRelRCReturn(rc, rc);
119 pVM->em.s.fGuruOnTripleFault = !fEnabled;
120 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
121 {
122 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
123 pVM->em.s.fGuruOnTripleFault = true;
124 }
125
126 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
127
128 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
129 * Whether to try correlate exit history in any context, detect hot spots and
130 * try optimize these using IEM if there are other exits close by. This
131 * overrides the context specific settings. */
132 bool fExitOptimizationEnabled = true;
133 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
134 AssertLogRelRCReturn(rc, rc);
135
136 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
137 * Whether to optimize exits in ring-0. Setting this to false will also disable
138 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
139 * capabilities of the host kernel, this optimization may be unavailable. */
140 bool fExitOptimizationEnabledR0 = true;
141 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
142 AssertLogRelRCReturn(rc, rc);
143 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
144
145 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
146 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
147 * hooks are in effect). */
148 /** @todo change the default to true here */
149 bool fExitOptimizationEnabledR0PreemptDisabled = true;
150 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
151 AssertLogRelRCReturn(rc, rc);
152 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
153
154 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
155 * Maximum number of instruction to let EMHistoryExec execute in one go. */
156 uint16_t cHistoryExecMaxInstructions = 8192;
157 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
158 AssertLogRelRCReturn(rc, rc);
159 if (cHistoryExecMaxInstructions < 16)
160 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
161
162 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
163 * Maximum number of instruction between exits during probing. */
164 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
165#ifdef RT_OS_WINDOWS
166 if (VM_IS_NEM_ENABLED(pVM))
167 cHistoryProbeMaxInstructionsWithoutExit = 32;
168#endif
169 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
170 cHistoryProbeMaxInstructionsWithoutExit);
171 AssertLogRelRCReturn(rc, rc);
172 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
173 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
174 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
175
176 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
177 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
178 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
179 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
180 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
181 cHistoryProbeMinInstructions);
182 AssertLogRelRCReturn(rc, rc);
183
184 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
185 {
186 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
187 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
188 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
189 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
190 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
191 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
192 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
193 }
194
195#ifdef VBOX_WITH_REM
196 /*
197 * Initialize the REM critical section.
198 */
199 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
200 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
201 AssertRCReturn(rc, rc);
202#endif
203
204 /*
205 * Saved state.
206 */
207 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
208 NULL, NULL, NULL,
209 NULL, emR3Save, NULL,
210 NULL, emR3Load, NULL);
211 if (RT_FAILURE(rc))
212 return rc;
213
214 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
215 {
216 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
217
218 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
219 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
220 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
221 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
222
223# define EM_REG_COUNTER(a, b, c) \
224 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
225 AssertRC(rc);
226
227# define EM_REG_COUNTER_USED(a, b, c) \
228 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
229 AssertRC(rc);
230
231# define EM_REG_PROFILE(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_PROFILE_ADV(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
237 AssertRC(rc);
238
239 /*
240 * Statistics.
241 */
242#ifdef VBOX_WITH_STATISTICS
243 PEMSTATS pStats;
244 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
245 if (RT_FAILURE(rc))
246 return rc;
247
248 pVCpu->em.s.pStatsR3 = pStats;
249 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
250
251# if 1 /* rawmode only? */
252 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
253 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
254 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%u/R3/PrivInst/Cli", "Number of cli instructions.");
255 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%u/R3/PrivInst/Sti", "Number of sli instructions.");
256 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%u/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
257 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%u/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
258 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%u/R3/PrivInst/Misc", "Number of misc. instructions.");
259 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%u/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
260 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%u/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
261 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%u/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
262 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%u/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
263 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%u/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
264 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%u/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
265 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%u/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
266 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%u/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
267 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%u/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
268 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%u/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
269 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%u/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
270 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%u/R3/PrivInst/Iret", "Number of iret instructions.");
271 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%u/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
272 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%u/R3/PrivInst/Lidt", "Number of lidt instructions.");
273 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%u/R3/PrivInst/Lldt", "Number of lldt instructions.");
274 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%u/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
275 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%u/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
276 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%u/R3/PrivInst/Syscall", "Number of syscall instructions.");
277 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%u/R3/PrivInst/Sysret", "Number of sysret instructions.");
278 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%u/Cli/Total", "Total number of cli instructions executed.");
279#endif
280 pVCpu->em.s.pCliStatTree = 0;
281
282 /* these should be considered for release statistics. */
283 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
284 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
285 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
286 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
287 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
288 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
289 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
290 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
291#endif /* VBOX_WITH_STATISTICS */
292 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
293 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
294#ifdef VBOX_WITH_STATISTICS
295 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
296 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
297 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
298 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
299 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
300 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
301#endif /* VBOX_WITH_STATISTICS */
302
303 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
304 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
305 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
306 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
307 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
308
309 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
310
311 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
312 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
313 AssertRC(rc);
314
315 /* History record statistics */
316 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
317 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
318 AssertRC(rc);
319
320 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
321 {
322 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
323 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
324 AssertRC(rc);
325 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
326 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
327 AssertRC(rc);
328 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
329 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
330 AssertRC(rc);
331 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
332 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
333 AssertRC(rc);
334 }
335
336 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
337 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
338 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
339 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
340 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
341 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
342 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
343 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
344 }
345
346 emR3InitDbg(pVM);
347 return VINF_SUCCESS;
348}
349
350
351/**
352 * Called when a VM initialization stage is completed.
353 *
354 * @returns VBox status code.
355 * @param pVM The cross context VM structure.
356 * @param enmWhat The initialization state that was completed.
357 */
358VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
359{
360 if (enmWhat == VMINITCOMPLETED_RING0)
361 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
362 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
363 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
364 return VINF_SUCCESS;
365}
366
367
368/**
369 * Applies relocations to data and code managed by this
370 * component. This function will be called at init and
371 * whenever the VMM need to relocate it self inside the GC.
372 *
373 * @param pVM The cross context VM structure.
374 */
375VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
376{
377 LogFlow(("EMR3Relocate\n"));
378 RT_NOREF(pVM);
379}
380
381
382/**
383 * Reset the EM state for a CPU.
384 *
385 * Called by EMR3Reset and hot plugging.
386 *
387 * @param pVCpu The cross context virtual CPU structure.
388 */
389VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
390{
391 /* Reset scheduling state. */
392 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
393
394 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
395 out of the HALTED state here so that enmPrevState doesn't end up as
396 HALTED when EMR3Execute returns. */
397 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
398 {
399 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
400 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
401 }
402}
403
404
405/**
406 * Reset notification.
407 *
408 * @param pVM The cross context VM structure.
409 */
410VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
411{
412 Log(("EMR3Reset: \n"));
413 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
414 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
415}
416
417
418/**
419 * Terminates the EM.
420 *
421 * Termination means cleaning up and freeing all resources,
422 * the VM it self is at this point powered off or suspended.
423 *
424 * @returns VBox status code.
425 * @param pVM The cross context VM structure.
426 */
427VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
428{
429#ifdef VBOX_WITH_REM
430 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
431#else
432 RT_NOREF(pVM);
433#endif
434 return VINF_SUCCESS;
435}
436
437
438/**
439 * Execute state save operation.
440 *
441 * @returns VBox status code.
442 * @param pVM The cross context VM structure.
443 * @param pSSM SSM operation handle.
444 */
445static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
446{
447 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
448 {
449 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
450
451 SSMR3PutBool(pSSM, false /*fForceRAW*/);
452
453 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
454 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
455 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
456
457 /* Save mwait state. */
458 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
459 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
460 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
461 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
462 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
463 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
464 AssertRCReturn(rc, rc);
465 }
466 return VINF_SUCCESS;
467}
468
469
470/**
471 * Execute state load operation.
472 *
473 * @returns VBox status code.
474 * @param pVM The cross context VM structure.
475 * @param pSSM SSM operation handle.
476 * @param uVersion Data layout version.
477 * @param uPass The data pass.
478 */
479static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
480{
481 /*
482 * Validate version.
483 */
484 if ( uVersion > EM_SAVED_STATE_VERSION
485 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
486 {
487 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
488 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
489 }
490 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
491
492 /*
493 * Load the saved state.
494 */
495 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
496 {
497 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
498
499 bool fForceRAWIgnored;
500 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
501 AssertRCReturn(rc, rc);
502
503 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
504 {
505 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
506 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
507 AssertRCReturn(rc, rc);
508 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
509
510 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
511 }
512 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
513 {
514 /* Load mwait state. */
515 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
516 AssertRCReturn(rc, rc);
517 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
518 AssertRCReturn(rc, rc);
519 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
520 AssertRCReturn(rc, rc);
521 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
522 AssertRCReturn(rc, rc);
523 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
524 AssertRCReturn(rc, rc);
525 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
526 AssertRCReturn(rc, rc);
527 }
528
529 Assert(!pVCpu->em.s.pCliStatTree);
530 }
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Argument packet for emR3SetExecutionPolicy.
537 */
538struct EMR3SETEXECPOLICYARGS
539{
540 EMEXECPOLICY enmPolicy;
541 bool fEnforce;
542};
543
544
545/**
546 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
547 */
548static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
549{
550 /*
551 * Only the first CPU changes the variables.
552 */
553 if (pVCpu->idCpu == 0)
554 {
555 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
556 switch (pArgs->enmPolicy)
557 {
558 case EMEXECPOLICY_RECOMPILE_RING0:
559 case EMEXECPOLICY_RECOMPILE_RING3:
560 break;
561 case EMEXECPOLICY_IEM_ALL:
562 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
563 break;
564 default:
565 AssertFailedReturn(VERR_INVALID_PARAMETER);
566 }
567 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
568 }
569
570 /*
571 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
572 */
573 return pVCpu->em.s.enmState == EMSTATE_RAW
574 || pVCpu->em.s.enmState == EMSTATE_HM
575 || pVCpu->em.s.enmState == EMSTATE_NEM
576 || pVCpu->em.s.enmState == EMSTATE_IEM
577 || pVCpu->em.s.enmState == EMSTATE_REM
578 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
579 ? VINF_EM_RESCHEDULE
580 : VINF_SUCCESS;
581}
582
583
584/**
585 * Changes an execution scheduling policy parameter.
586 *
587 * This is used to enable or disable raw-mode / hardware-virtualization
588 * execution of user and supervisor code.
589 *
590 * @returns VINF_SUCCESS on success.
591 * @returns VINF_RESCHEDULE if a rescheduling might be required.
592 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
593 *
594 * @param pUVM The user mode VM handle.
595 * @param enmPolicy The scheduling policy to change.
596 * @param fEnforce Whether to enforce the policy or not.
597 */
598VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
599{
600 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
601 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
602 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
603
604 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
605 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
606}
607
608
609/**
610 * Queries an execution scheduling policy parameter.
611 *
612 * @returns VBox status code
613 * @param pUVM The user mode VM handle.
614 * @param enmPolicy The scheduling policy to query.
615 * @param pfEnforced Where to return the current value.
616 */
617VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
618{
619 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
620 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
621 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
622 PVM pVM = pUVM->pVM;
623 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
624
625 /* No need to bother EMTs with a query. */
626 switch (enmPolicy)
627 {
628 case EMEXECPOLICY_RECOMPILE_RING0:
629 case EMEXECPOLICY_RECOMPILE_RING3:
630 *pfEnforced = false;
631 break;
632 case EMEXECPOLICY_IEM_ALL:
633 *pfEnforced = pVM->em.s.fIemExecutesAll;
634 break;
635 default:
636 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
637 }
638
639 return VINF_SUCCESS;
640}
641
642
643/**
644 * Queries the main execution engine of the VM.
645 *
646 * @returns VBox status code
647 * @param pUVM The user mode VM handle.
648 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
649 */
650VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
651{
652 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
653 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
654
655 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
656 PVM pVM = pUVM->pVM;
657 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
658
659 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
660 return VINF_SUCCESS;
661}
662
663
664/**
665 * Raise a fatal error.
666 *
667 * Safely terminate the VM with full state report and stuff. This function
668 * will naturally never return.
669 *
670 * @param pVCpu The cross context virtual CPU structure.
671 * @param rc VBox status code.
672 */
673VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
674{
675 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
676 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
677}
678
679
680#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
681/**
682 * Gets the EM state name.
683 *
684 * @returns pointer to read only state name,
685 * @param enmState The state.
686 */
687static const char *emR3GetStateName(EMSTATE enmState)
688{
689 switch (enmState)
690 {
691 case EMSTATE_NONE: return "EMSTATE_NONE";
692 case EMSTATE_RAW: return "EMSTATE_RAW";
693 case EMSTATE_HM: return "EMSTATE_HM";
694 case EMSTATE_IEM: return "EMSTATE_IEM";
695 case EMSTATE_REM: return "EMSTATE_REM";
696 case EMSTATE_HALTED: return "EMSTATE_HALTED";
697 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
698 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
699 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
700 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
701 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
702 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
703 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
704 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
705 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
706 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
707 case EMSTATE_NEM: return "EMSTATE_NEM";
708 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
709 default: return "Unknown!";
710 }
711}
712#endif /* LOG_ENABLED || VBOX_STRICT */
713
714
715/**
716 * Handle pending ring-3 I/O port write.
717 *
718 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
719 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
720 *
721 * @returns Strict VBox status code.
722 * @param pVM The cross context VM structure.
723 * @param pVCpu The cross context virtual CPU structure.
724 */
725VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
726{
727 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
728
729 /* Get and clear the pending data. */
730 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
731 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
732 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
733 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
734 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
735
736 /* Assert sanity. */
737 switch (cbValue)
738 {
739 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
740 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
741 case 4: break;
742 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
743 }
744 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
745
746 /* Do the work.*/
747 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
748 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
749 if (IOM_SUCCESS(rcStrict))
750 {
751 pVCpu->cpum.GstCtx.rip += cbInstr;
752 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
753 }
754 return rcStrict;
755}
756
757
758/**
759 * Handle pending ring-3 I/O port write.
760 *
761 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
762 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
763 *
764 * @returns Strict VBox status code.
765 * @param pVM The cross context VM structure.
766 * @param pVCpu The cross context virtual CPU structure.
767 */
768VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
769{
770 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
771
772 /* Get and clear the pending data. */
773 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
774 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
775 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
776 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
777
778 /* Assert sanity. */
779 switch (cbValue)
780 {
781 case 1: break;
782 case 2: break;
783 case 4: break;
784 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
785 }
786 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
787 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
788
789 /* Do the work.*/
790 uint32_t uValue = 0;
791 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
792 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
793 if (IOM_SUCCESS(rcStrict))
794 {
795 if (cbValue == 4)
796 pVCpu->cpum.GstCtx.rax = uValue;
797 else if (cbValue == 2)
798 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
799 else
800 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
801 pVCpu->cpum.GstCtx.rip += cbInstr;
802 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
803 }
804 return rcStrict;
805}
806
807
808/**
809 * Debug loop.
810 *
811 * @returns VBox status code for EM.
812 * @param pVM The cross context VM structure.
813 * @param pVCpu The cross context virtual CPU structure.
814 * @param rc Current EM VBox status code.
815 */
816static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
817{
818 for (;;)
819 {
820 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
821 const VBOXSTRICTRC rcLast = rc;
822
823 /*
824 * Debug related RC.
825 */
826 switch (VBOXSTRICTRC_VAL(rc))
827 {
828 /*
829 * Single step an instruction.
830 */
831 case VINF_EM_DBG_STEP:
832 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
833 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
834 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
835 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
836 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
837 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
838 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
839#ifdef VBOX_WITH_REM
840 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
841 rc = emR3RemStep(pVM, pVCpu);
842#endif
843 else
844 {
845 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
846 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
847 rc = VINF_EM_DBG_STEPPED;
848 }
849 break;
850
851 /*
852 * Simple events: stepped, breakpoint, stop/assertion.
853 */
854 case VINF_EM_DBG_STEPPED:
855 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
856 break;
857
858 case VINF_EM_DBG_BREAKPOINT:
859 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
860 break;
861
862 case VINF_EM_DBG_STOP:
863 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
864 break;
865
866 case VINF_EM_DBG_EVENT:
867 rc = DBGFR3EventHandlePending(pVM, pVCpu);
868 break;
869
870 case VINF_EM_DBG_HYPER_STEPPED:
871 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
872 break;
873
874 case VINF_EM_DBG_HYPER_BREAKPOINT:
875 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
876 break;
877
878 case VINF_EM_DBG_HYPER_ASSERTION:
879 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
880 RTLogFlush(NULL);
881 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
882 break;
883
884 /*
885 * Guru meditation.
886 */
887 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
888 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
889 break;
890 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
891 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
892 break;
893 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
894 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
895 break;
896
897 default: /** @todo don't use default for guru, but make special errors code! */
898 {
899 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
900 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
901 break;
902 }
903 }
904
905 /*
906 * Process the result.
907 */
908 switch (VBOXSTRICTRC_VAL(rc))
909 {
910 /*
911 * Continue the debugging loop.
912 */
913 case VINF_EM_DBG_STEP:
914 case VINF_EM_DBG_STOP:
915 case VINF_EM_DBG_EVENT:
916 case VINF_EM_DBG_STEPPED:
917 case VINF_EM_DBG_BREAKPOINT:
918 case VINF_EM_DBG_HYPER_STEPPED:
919 case VINF_EM_DBG_HYPER_BREAKPOINT:
920 case VINF_EM_DBG_HYPER_ASSERTION:
921 break;
922
923 /*
924 * Resuming execution (in some form) has to be done here if we got
925 * a hypervisor debug event.
926 */
927 case VINF_SUCCESS:
928 case VINF_EM_RESUME:
929 case VINF_EM_SUSPEND:
930 case VINF_EM_RESCHEDULE:
931 case VINF_EM_RESCHEDULE_RAW:
932 case VINF_EM_RESCHEDULE_REM:
933 case VINF_EM_HALT:
934 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
935 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
936 if (rc == VINF_SUCCESS)
937 rc = VINF_EM_RESCHEDULE;
938 return rc;
939
940 /*
941 * The debugger isn't attached.
942 * We'll simply turn the thing off since that's the easiest thing to do.
943 */
944 case VERR_DBGF_NOT_ATTACHED:
945 switch (VBOXSTRICTRC_VAL(rcLast))
946 {
947 case VINF_EM_DBG_HYPER_STEPPED:
948 case VINF_EM_DBG_HYPER_BREAKPOINT:
949 case VINF_EM_DBG_HYPER_ASSERTION:
950 case VERR_TRPM_PANIC:
951 case VERR_TRPM_DONT_PANIC:
952 case VERR_VMM_RING0_ASSERTION:
953 case VERR_VMM_HYPER_CR3_MISMATCH:
954 case VERR_VMM_RING3_CALL_DISABLED:
955 return rcLast;
956 }
957 return VINF_EM_OFF;
958
959 /*
960 * Status codes terminating the VM in one or another sense.
961 */
962 case VINF_EM_TERMINATE:
963 case VINF_EM_OFF:
964 case VINF_EM_RESET:
965 case VINF_EM_NO_MEMORY:
966 case VINF_EM_RAW_STALE_SELECTOR:
967 case VINF_EM_RAW_IRET_TRAP:
968 case VERR_TRPM_PANIC:
969 case VERR_TRPM_DONT_PANIC:
970 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
971 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
972 case VERR_VMM_RING0_ASSERTION:
973 case VERR_VMM_HYPER_CR3_MISMATCH:
974 case VERR_VMM_RING3_CALL_DISABLED:
975 case VERR_INTERNAL_ERROR:
976 case VERR_INTERNAL_ERROR_2:
977 case VERR_INTERNAL_ERROR_3:
978 case VERR_INTERNAL_ERROR_4:
979 case VERR_INTERNAL_ERROR_5:
980 case VERR_IPE_UNEXPECTED_STATUS:
981 case VERR_IPE_UNEXPECTED_INFO_STATUS:
982 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
983 return rc;
984
985 /*
986 * The rest is unexpected, and will keep us here.
987 */
988 default:
989 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
990 break;
991 }
992 } /* debug for ever */
993}
994
995
996#if defined(VBOX_WITH_REM) || defined(DEBUG)
997/**
998 * Steps recompiled code.
999 *
1000 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1001 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1002 *
1003 * @param pVM The cross context VM structure.
1004 * @param pVCpu The cross context virtual CPU structure.
1005 */
1006static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1007{
1008 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1009
1010# ifdef VBOX_WITH_REM
1011 EMRemLock(pVM);
1012
1013 /*
1014 * Switch to REM, step instruction, switch back.
1015 */
1016 int rc = REMR3State(pVM, pVCpu);
1017 if (RT_SUCCESS(rc))
1018 {
1019 rc = REMR3Step(pVM, pVCpu);
1020 REMR3StateBack(pVM, pVCpu);
1021 }
1022 EMRemUnlock(pVM);
1023
1024# else
1025 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1026# endif
1027
1028 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1029 return rc;
1030}
1031#endif /* VBOX_WITH_REM || DEBUG */
1032
1033
1034#ifdef VBOX_WITH_REM
1035/**
1036 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1037 * critical section.
1038 *
1039 * @returns false - new fInREMState value.
1040 * @param pVM The cross context VM structure.
1041 * @param pVCpu The cross context virtual CPU structure.
1042 */
1043DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1044{
1045 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1046 REMR3StateBack(pVM, pVCpu);
1047 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1048
1049 EMRemUnlock(pVM);
1050 return false;
1051}
1052#endif
1053
1054
1055/**
1056 * Executes recompiled code.
1057 *
1058 * This function contains the recompiler version of the inner
1059 * execution loop (the outer loop being in EMR3ExecuteVM()).
1060 *
1061 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1062 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1063 *
1064 * @param pVM The cross context VM structure.
1065 * @param pVCpu The cross context virtual CPU structure.
1066 * @param pfFFDone Where to store an indicator telling whether or not
1067 * FFs were done before returning.
1068 *
1069 */
1070static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1071{
1072#ifdef LOG_ENABLED
1073 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1074
1075 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1076 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1077 else
1078 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1079#endif
1080 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1081
1082#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1083 AssertMsg( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1084 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1085 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1086#endif
1087
1088 /*
1089 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1090 * or the REM suggests raw-mode execution.
1091 */
1092 *pfFFDone = false;
1093#ifdef VBOX_WITH_REM
1094 bool fInREMState = false;
1095#else
1096 uint32_t cLoops = 0;
1097#endif
1098 int rc = VINF_SUCCESS;
1099 for (;;)
1100 {
1101#ifdef VBOX_WITH_REM
1102 /*
1103 * Lock REM and update the state if not already in sync.
1104 *
1105 * Note! Big lock, but you are not supposed to own any lock when
1106 * coming in here.
1107 */
1108 if (!fInREMState)
1109 {
1110 EMRemLock(pVM);
1111 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1112
1113 /* Flush the recompiler translation blocks if the VCPU has changed,
1114 also force a full CPU state resync. */
1115 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1116 {
1117 REMFlushTBs(pVM);
1118 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1119 }
1120 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1121
1122 rc = REMR3State(pVM, pVCpu);
1123
1124 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1125 if (RT_FAILURE(rc))
1126 break;
1127 fInREMState = true;
1128
1129 /*
1130 * We might have missed the raising of VMREQ, TIMER and some other
1131 * important FFs while we were busy switching the state. So, check again.
1132 */
1133 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1134 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1135 {
1136 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1137 goto l_REMDoForcedActions;
1138 }
1139 }
1140#endif
1141
1142 /*
1143 * Execute REM.
1144 */
1145 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1146 {
1147 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1148#ifdef VBOX_WITH_REM
1149 rc = REMR3Run(pVM, pVCpu);
1150#else
1151 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1152#endif
1153 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1154 }
1155 else
1156 {
1157 /* Give up this time slice; virtual time continues */
1158 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1159 RTThreadSleep(5);
1160 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1161 rc = VINF_SUCCESS;
1162 }
1163
1164 /*
1165 * Deal with high priority post execution FFs before doing anything
1166 * else. Sync back the state and leave the lock to be on the safe side.
1167 */
1168 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1169 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1170 {
1171#ifdef VBOX_WITH_REM
1172 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1173#endif
1174 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1175 }
1176
1177 /*
1178 * Process the returned status code.
1179 */
1180 if (rc != VINF_SUCCESS)
1181 {
1182 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1183 break;
1184 if (rc != VINF_REM_INTERRUPED_FF)
1185 {
1186#ifndef VBOX_WITH_REM
1187 /* Try dodge unimplemented IEM trouble by reschduling. */
1188 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1189 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1190 {
1191 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1192 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1193 {
1194 rc = VINF_EM_RESCHEDULE;
1195 break;
1196 }
1197 }
1198#endif
1199
1200 /*
1201 * Anything which is not known to us means an internal error
1202 * and the termination of the VM!
1203 */
1204 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1205 break;
1206 }
1207 }
1208
1209
1210 /*
1211 * Check and execute forced actions.
1212 *
1213 * Sync back the VM state and leave the lock before calling any of
1214 * these, you never know what's going to happen here.
1215 */
1216#ifdef VBOX_HIGH_RES_TIMERS_HACK
1217 TMTimerPollVoid(pVM, pVCpu);
1218#endif
1219 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1220 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1221 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1222 {
1223#ifdef VBOX_WITH_REM
1224l_REMDoForcedActions:
1225 if (fInREMState)
1226 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1227#endif
1228 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1229 rc = emR3ForcedActions(pVM, pVCpu, rc);
1230 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1231 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1232 if ( rc != VINF_SUCCESS
1233 && rc != VINF_EM_RESCHEDULE_REM)
1234 {
1235 *pfFFDone = true;
1236 break;
1237 }
1238 }
1239
1240#ifndef VBOX_WITH_REM
1241 /*
1242 * Have to check if we can get back to fast execution mode every so often.
1243 */
1244 if (!(++cLoops & 7))
1245 {
1246 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1247 if ( enmCheck != EMSTATE_REM
1248 && enmCheck != EMSTATE_IEM_THEN_REM)
1249 return VINF_EM_RESCHEDULE;
1250 }
1251#endif
1252
1253 } /* The Inner Loop, recompiled execution mode version. */
1254
1255
1256#ifdef VBOX_WITH_REM
1257 /*
1258 * Returning. Sync back the VM state if required.
1259 */
1260 if (fInREMState)
1261 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1262#endif
1263
1264 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1265 return rc;
1266}
1267
1268
1269#ifdef DEBUG
1270
1271int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1272{
1273 EMSTATE enmOldState = pVCpu->em.s.enmState;
1274
1275 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1276
1277 Log(("Single step BEGIN:\n"));
1278 for (uint32_t i = 0; i < cIterations; i++)
1279 {
1280 DBGFR3PrgStep(pVCpu);
1281 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1282 emR3RemStep(pVM, pVCpu);
1283 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1284 break;
1285 }
1286 Log(("Single step END:\n"));
1287 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1288 pVCpu->em.s.enmState = enmOldState;
1289 return VINF_EM_RESCHEDULE;
1290}
1291
1292#endif /* DEBUG */
1293
1294
1295/**
1296 * Try execute the problematic code in IEM first, then fall back on REM if there
1297 * is too much of it or if IEM doesn't implement something.
1298 *
1299 * @returns Strict VBox status code from IEMExecLots.
1300 * @param pVM The cross context VM structure.
1301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1302 * @param pfFFDone Force flags done indicator.
1303 *
1304 * @thread EMT(pVCpu)
1305 */
1306static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1307{
1308 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1309 *pfFFDone = false;
1310
1311 /*
1312 * Execute in IEM for a while.
1313 */
1314 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1315 {
1316 uint32_t cInstructions;
1317 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1318 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1319 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1320 if (rcStrict != VINF_SUCCESS)
1321 {
1322 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1323 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1324 break;
1325
1326 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1327 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1328 return rcStrict;
1329 }
1330
1331 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1332 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1333 {
1334 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1335 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1336 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1337 pVCpu->em.s.enmState = enmNewState;
1338 return VINF_SUCCESS;
1339 }
1340
1341 /*
1342 * Check for pending actions.
1343 */
1344 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1345 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1346 return VINF_SUCCESS;
1347 }
1348
1349 /*
1350 * Switch to REM.
1351 */
1352 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1353 pVCpu->em.s.enmState = EMSTATE_REM;
1354 return VINF_SUCCESS;
1355}
1356
1357
1358/**
1359 * Decides whether to execute RAW, HWACC or REM.
1360 *
1361 * @returns new EM state
1362 * @param pVM The cross context VM structure.
1363 * @param pVCpu The cross context virtual CPU structure.
1364 */
1365EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1366{
1367 /*
1368 * We stay in the wait for SIPI state unless explicitly told otherwise.
1369 */
1370 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1371 return EMSTATE_WAIT_SIPI;
1372
1373 /*
1374 * Execute everything in IEM?
1375 */
1376 if (pVM->em.s.fIemExecutesAll)
1377 return EMSTATE_IEM;
1378
1379 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1380 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1381 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1382
1383 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1384 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1385 {
1386 if (VM_IS_HM_ENABLED(pVM))
1387 {
1388 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1389 return EMSTATE_HM;
1390 }
1391 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1392 return EMSTATE_NEM;
1393
1394 /*
1395 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1396 * turns off monitoring features essential for raw mode!
1397 */
1398 return EMSTATE_IEM_THEN_REM;
1399 }
1400
1401 /*
1402 * Standard raw-mode:
1403 *
1404 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1405 * or 32 bits protected mode ring 0 code
1406 *
1407 * The tests are ordered by the likelihood of being true during normal execution.
1408 */
1409 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1410 {
1411 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1412 return EMSTATE_REM;
1413 }
1414
1415# ifndef VBOX_RAW_V86
1416 if (EFlags.u32 & X86_EFL_VM) {
1417 Log2(("raw mode refused: VM_MASK\n"));
1418 return EMSTATE_REM;
1419 }
1420# endif
1421
1422 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1423 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1424 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1425 {
1426 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1427 return EMSTATE_REM;
1428 }
1429
1430 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1431 {
1432 uint32_t u32Dummy, u32Features;
1433
1434 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1435 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1436 return EMSTATE_REM;
1437 }
1438
1439 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1440 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1441 || (uSS & X86_SEL_RPL) == 3)
1442 {
1443 if (!(EFlags.u32 & X86_EFL_IF))
1444 {
1445 Log2(("raw mode refused: IF (RawR3)\n"));
1446 return EMSTATE_REM;
1447 }
1448
1449 if (!(u32CR0 & X86_CR0_WP))
1450 {
1451 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1452 return EMSTATE_REM;
1453 }
1454 }
1455 else
1456 {
1457 /* Only ring 0 supervisor code. */
1458 if ((uSS & X86_SEL_RPL) != 0)
1459 {
1460 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1461 return EMSTATE_REM;
1462 }
1463
1464 // Let's start with pure 32 bits ring 0 code first
1465 /** @todo What's pure 32-bit mode? flat? */
1466 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1467 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1468 {
1469 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1470 return EMSTATE_REM;
1471 }
1472
1473 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1474 if (!(u32CR0 & X86_CR0_WP))
1475 {
1476 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1477 return EMSTATE_REM;
1478 }
1479
1480# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1481 if (!(EFlags.u32 & X86_EFL_IF))
1482 {
1483 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1484 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1485 return EMSTATE_REM;
1486 }
1487# endif
1488
1489# ifndef VBOX_WITH_RAW_RING1
1490 /** @todo still necessary??? */
1491 if (EFlags.Bits.u2IOPL != 0)
1492 {
1493 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1494 return EMSTATE_REM;
1495 }
1496# endif
1497 }
1498
1499 /*
1500 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1501 */
1502 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1503 {
1504 Log2(("raw mode refused: stale CS\n"));
1505 return EMSTATE_REM;
1506 }
1507 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1508 {
1509 Log2(("raw mode refused: stale SS\n"));
1510 return EMSTATE_REM;
1511 }
1512 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1513 {
1514 Log2(("raw mode refused: stale DS\n"));
1515 return EMSTATE_REM;
1516 }
1517 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1518 {
1519 Log2(("raw mode refused: stale ES\n"));
1520 return EMSTATE_REM;
1521 }
1522 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1523 {
1524 Log2(("raw mode refused: stale FS\n"));
1525 return EMSTATE_REM;
1526 }
1527 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1528 {
1529 Log2(("raw mode refused: stale GS\n"));
1530 return EMSTATE_REM;
1531 }
1532
1533# ifdef VBOX_WITH_SAFE_STR
1534 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1535 {
1536 Log(("Raw mode refused -> TR=0\n"));
1537 return EMSTATE_REM;
1538 }
1539# endif
1540
1541 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1542 return EMSTATE_RAW;
1543}
1544
1545
1546/**
1547 * Executes all high priority post execution force actions.
1548 *
1549 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1550 * fatal error status code.
1551 *
1552 * @param pVM The cross context VM structure.
1553 * @param pVCpu The cross context virtual CPU structure.
1554 * @param rc The current strict VBox status code rc.
1555 */
1556VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1557{
1558 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1559
1560 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1561 PDMCritSectBothFF(pVCpu);
1562
1563 /* Update CR3 (Nested Paging case for HM). */
1564 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1565 {
1566 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1567 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1568 if (RT_FAILURE(rc2))
1569 return rc2;
1570 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1571 }
1572
1573 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1574 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1575 {
1576 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1577 if (CPUMIsGuestInPAEMode(pVCpu))
1578 {
1579 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1580 AssertPtr(pPdpes);
1581
1582 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1583 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1584 }
1585 else
1586 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1587 }
1588
1589 /* IEM has pending work (typically memory write after INS instruction). */
1590 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1591 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1592
1593 /* IOM has pending work (comitting an I/O or MMIO write). */
1594 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1595 {
1596 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1597 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1598 { /* half likely, or at least it's a line shorter. */ }
1599 else if (rc == VINF_SUCCESS)
1600 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1601 else
1602 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1603 }
1604
1605 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1606 {
1607 if ( rc > VINF_EM_NO_MEMORY
1608 && rc <= VINF_EM_LAST)
1609 rc = VINF_EM_NO_MEMORY;
1610 }
1611
1612 return rc;
1613}
1614
1615
1616/**
1617 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1618 *
1619 * @returns VBox status code.
1620 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1621 * @param pVCpu The cross context virtual CPU structure.
1622 */
1623static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1624{
1625#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1626 /* Handle the "external interrupt" VM-exit intercept. */
1627 if ( CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1628 && !CPUMIsGuestVmxExitCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1629 {
1630 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1631 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1632 && rcStrict != VINF_VMX_VMEXIT
1633 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1634 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1635 return VBOXSTRICTRC_TODO(rcStrict);
1636 }
1637#else
1638 RT_NOREF(pVCpu);
1639#endif
1640 return VINF_NO_CHANGE;
1641}
1642
1643
1644/**
1645 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1646 *
1647 * @returns VBox status code.
1648 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1649 * @param pVCpu The cross context virtual CPU structure.
1650 */
1651static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1652{
1653#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1654 /* Handle the physical interrupt intercept (can be masked by the guest hypervisor). */
1655 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1656 {
1657 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1658 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1659 if (RT_SUCCESS(rcStrict))
1660 {
1661 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1662 && rcStrict != VINF_SVM_VMEXIT
1663 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1664 return VBOXSTRICTRC_VAL(rcStrict);
1665 }
1666
1667 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1668 return VINF_EM_TRIPLE_FAULT;
1669 }
1670#else
1671 NOREF(pVCpu);
1672#endif
1673 return VINF_NO_CHANGE;
1674}
1675
1676
1677/**
1678 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1679 *
1680 * @returns VBox status code.
1681 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1682 * @param pVCpu The cross context virtual CPU structure.
1683 */
1684static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1685{
1686#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1687 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1688 {
1689 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1690 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1691 if (RT_SUCCESS(rcStrict))
1692 {
1693 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1694 Assert(rcStrict != VINF_SVM_VMEXIT);
1695 return VBOXSTRICTRC_VAL(rcStrict);
1696 }
1697 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1698 return VINF_EM_TRIPLE_FAULT;
1699 }
1700#else
1701 NOREF(pVCpu);
1702#endif
1703 return VINF_NO_CHANGE;
1704}
1705
1706
1707/**
1708 * Executes all pending forced actions.
1709 *
1710 * Forced actions can cause execution delays and execution
1711 * rescheduling. The first we deal with using action priority, so
1712 * that for instance pending timers aren't scheduled and ran until
1713 * right before execution. The rescheduling we deal with using
1714 * return codes. The same goes for VM termination, only in that case
1715 * we exit everything.
1716 *
1717 * @returns VBox status code of equal or greater importance/severity than rc.
1718 * The most important ones are: VINF_EM_RESCHEDULE,
1719 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1720 *
1721 * @param pVM The cross context VM structure.
1722 * @param pVCpu The cross context virtual CPU structure.
1723 * @param rc The current rc.
1724 *
1725 */
1726int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1727{
1728 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1729#ifdef VBOX_STRICT
1730 int rcIrq = VINF_SUCCESS;
1731#endif
1732 int rc2;
1733#define UPDATE_RC() \
1734 do { \
1735 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1736 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1737 break; \
1738 if (!rc || rc2 < rc) \
1739 rc = rc2; \
1740 } while (0)
1741 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1742
1743 /*
1744 * Post execution chunk first.
1745 */
1746 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1747 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1748 {
1749 /*
1750 * EMT Rendezvous (must be serviced before termination).
1751 */
1752 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1753 {
1754 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1755 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1756 UPDATE_RC();
1757 /** @todo HACK ALERT! The following test is to make sure EM+TM
1758 * thinks the VM is stopped/reset before the next VM state change
1759 * is made. We need a better solution for this, or at least make it
1760 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1761 * VINF_EM_SUSPEND). */
1762 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1763 {
1764 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1765 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1766 return rc;
1767 }
1768 }
1769
1770 /*
1771 * State change request (cleared by vmR3SetStateLocked).
1772 */
1773 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1774 {
1775 VMSTATE enmState = VMR3GetState(pVM);
1776 switch (enmState)
1777 {
1778 case VMSTATE_FATAL_ERROR:
1779 case VMSTATE_FATAL_ERROR_LS:
1780 case VMSTATE_GURU_MEDITATION:
1781 case VMSTATE_GURU_MEDITATION_LS:
1782 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1783 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1784 return VINF_EM_SUSPEND;
1785
1786 case VMSTATE_DESTROYING:
1787 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1788 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1789 return VINF_EM_TERMINATE;
1790
1791 default:
1792 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1793 }
1794 }
1795
1796 /*
1797 * Debugger Facility polling.
1798 */
1799 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1800 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1801 {
1802 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1803 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1804 UPDATE_RC();
1805 }
1806
1807 /*
1808 * Postponed reset request.
1809 */
1810 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1811 {
1812 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1813 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1814 UPDATE_RC();
1815 }
1816
1817 /*
1818 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1819 */
1820 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1821 {
1822 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1823 UPDATE_RC();
1824 if (rc == VINF_EM_NO_MEMORY)
1825 return rc;
1826 }
1827
1828 /* check that we got them all */
1829 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1830 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1831 }
1832
1833 /*
1834 * Normal priority then.
1835 * (Executed in no particular order.)
1836 */
1837 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1838 {
1839 /*
1840 * PDM Queues are pending.
1841 */
1842 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1843 PDMR3QueueFlushAll(pVM);
1844
1845 /*
1846 * PDM DMA transfers are pending.
1847 */
1848 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1849 PDMR3DmaRun(pVM);
1850
1851 /*
1852 * EMT Rendezvous (make sure they are handled before the requests).
1853 */
1854 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1855 {
1856 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1857 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1858 UPDATE_RC();
1859 /** @todo HACK ALERT! The following test is to make sure EM+TM
1860 * thinks the VM is stopped/reset before the next VM state change
1861 * is made. We need a better solution for this, or at least make it
1862 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1863 * VINF_EM_SUSPEND). */
1864 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1865 {
1866 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1867 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1868 return rc;
1869 }
1870 }
1871
1872 /*
1873 * Requests from other threads.
1874 */
1875 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1876 {
1877 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1878 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1879 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1880 {
1881 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1882 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1883 return rc2;
1884 }
1885 UPDATE_RC();
1886 /** @todo HACK ALERT! The following test is to make sure EM+TM
1887 * thinks the VM is stopped/reset before the next VM state change
1888 * is made. We need a better solution for this, or at least make it
1889 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1890 * VINF_EM_SUSPEND). */
1891 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1892 {
1893 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1894 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1895 return rc;
1896 }
1897 }
1898
1899#ifdef VBOX_WITH_REM
1900 /* Replay the handler notification changes. */
1901 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1902 {
1903 /* Try not to cause deadlocks. */
1904 if ( pVM->cCpus == 1
1905 || ( !PGMIsLockOwner(pVM)
1906 && !IOMIsLockWriteOwner(pVM))
1907 )
1908 {
1909 EMRemLock(pVM);
1910 REMR3ReplayHandlerNotifications(pVM);
1911 EMRemUnlock(pVM);
1912 }
1913 }
1914#endif
1915
1916 /* check that we got them all */
1917 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1918 }
1919
1920 /*
1921 * Normal priority then. (per-VCPU)
1922 * (Executed in no particular order.)
1923 */
1924 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1925 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1926 {
1927 /*
1928 * Requests from other threads.
1929 */
1930 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1931 {
1932 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1933 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1934 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1935 {
1936 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1937 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1938 return rc2;
1939 }
1940 UPDATE_RC();
1941 /** @todo HACK ALERT! The following test is to make sure EM+TM
1942 * thinks the VM is stopped/reset before the next VM state change
1943 * is made. We need a better solution for this, or at least make it
1944 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1945 * VINF_EM_SUSPEND). */
1946 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1947 {
1948 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1949 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1950 return rc;
1951 }
1952 }
1953
1954 /* check that we got them all */
1955 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1956 }
1957
1958 /*
1959 * High priority pre execution chunk last.
1960 * (Executed in ascending priority order.)
1961 */
1962 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1963 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1964 {
1965 /*
1966 * Timers before interrupts.
1967 */
1968 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1969 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1970 TMR3TimerQueuesDo(pVM);
1971
1972 /*
1973 * Pick up asynchronously posted interrupts into the APIC.
1974 */
1975 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1976 APICUpdatePendingInterrupts(pVCpu);
1977
1978 /*
1979 * The instruction following an emulated STI should *always* be executed!
1980 *
1981 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1982 * the eip is the same as the inhibited instr address. Before we
1983 * are able to execute this instruction in raw mode (iret to
1984 * guest code) an external interrupt might force a world switch
1985 * again. Possibly allowing a guest interrupt to be dispatched
1986 * in the process. This could break the guest. Sounds very
1987 * unlikely, but such timing sensitive problem are not as rare as
1988 * you might think.
1989 */
1990 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1991 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1992 {
1993 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1994 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1995 {
1996 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1997 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1998 }
1999 else
2000 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2001 }
2002
2003 /** @todo SMIs. If we implement SMIs, this is where they will have to be
2004 * delivered. */
2005
2006#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2007 /*
2008 * VMX Nested-guest APIC-write pending (can cause VM-exits).
2009 * Takes priority over even SMI and INIT signals.
2010 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
2011 */
2012 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
2013 {
2014 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
2015 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2016 UPDATE_RC();
2017 }
2018
2019 /*
2020 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
2021 * Takes priority over "Traps on the previous instruction".
2022 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
2023 */
2024 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
2025 {
2026 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
2027 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
2028 UPDATE_RC();
2029 }
2030
2031 /*
2032 * VMX Nested-guest preemption timer VM-exit.
2033 * Takes priority over NMI-window VM-exits.
2034 */
2035 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
2036 {
2037 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
2038 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2039 UPDATE_RC();
2040 }
2041#endif
2042
2043 /*
2044 * Guest event injection.
2045 */
2046 bool fWakeupPending = false;
2047 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
2048 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
2049 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
2050 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
2051 {
2052 bool fInVmxNonRootMode;
2053 bool fInSvmHwvirtMode;
2054 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
2055 if (fInNestedGuest)
2056 {
2057 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
2058 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
2059 }
2060 else
2061 {
2062 fInVmxNonRootMode = false;
2063 fInSvmHwvirtMode = false;
2064 }
2065
2066 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
2067 if (fGif)
2068 {
2069#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2070 /*
2071 * VMX NMI-window VM-exit.
2072 * Takes priority over non-maskable interrupts (NMIs).
2073 * Interrupt shadows block NMI-window VM-exits.
2074 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
2075 *
2076 * See Intel spec. 25.2 "Other Causes Of VM Exits".
2077 * See Intel spec. 26.7.6 "NMI-Window Exiting".
2078 */
2079 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
2080 && !CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
2081 {
2082 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
2083 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents);
2084 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
2085 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2086 && rc2 != VINF_PGM_CHANGE_MODE
2087 && rc2 != VINF_VMX_VMEXIT
2088 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2089 UPDATE_RC();
2090 }
2091 else
2092#endif
2093 /*
2094 * NMIs (take priority over external interrupts).
2095 */
2096 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
2097 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2098 {
2099#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2100 if ( fInVmxNonRootMode
2101 && CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
2102 {
2103 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
2104 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
2105 UPDATE_RC();
2106 }
2107 else
2108#endif
2109#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2110 if ( fInSvmHwvirtMode
2111 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
2112 {
2113 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
2114 AssertMsg( rc2 != VINF_PGM_CHANGE_MODE
2115 && rc2 != VINF_SVM_VMEXIT
2116 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2117 UPDATE_RC();
2118 }
2119 else
2120#endif
2121 {
2122 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
2123 if (rc2 == VINF_SUCCESS)
2124 {
2125 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2126 fWakeupPending = true;
2127 if (pVM->em.s.fIemExecutesAll)
2128 rc2 = VINF_EM_RESCHEDULE;
2129 else
2130 {
2131 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
2132 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
2133 : VINF_EM_RESCHEDULE_REM;
2134 }
2135 }
2136 UPDATE_RC();
2137 }
2138 }
2139#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2140 /*
2141 * VMX Interrupt-window VM-exits.
2142 * Takes priority over external interrupts.
2143 */
2144 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
2145 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2146 {
2147 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
2148 Assert(pVCpu->cpum.GstCtx.hwvirt.vmx.fInterceptEvents);
2149 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
2150 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2151 && rc2 != VINF_PGM_CHANGE_MODE
2152 && rc2 != VINF_VMX_VMEXIT
2153 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2154 UPDATE_RC();
2155 }
2156#endif
2157#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2158 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
2159 * actually pending like we currently do. */
2160#endif
2161 /*
2162 * External interrupts.
2163 */
2164 else
2165 {
2166 /*
2167 * VMX: virtual interrupts takes priority over physical interrupts.
2168 * SVM: physical interrupts takes priority over virtual interrupts.
2169 */
2170 if ( fInVmxNonRootMode
2171 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2172 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2173 {
2174 /** @todo NSTVMX: virtual-interrupt delivery. */
2175 rc2 = VINF_SUCCESS;
2176 }
2177 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2178 && CPUMIsGuestPhysIntrEnabled(pVCpu))
2179 {
2180 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2181 if (fInVmxNonRootMode)
2182 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
2183 else if (fInSvmHwvirtMode)
2184 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
2185 else
2186 rc2 = VINF_NO_CHANGE;
2187
2188 if (rc2 == VINF_NO_CHANGE)
2189 {
2190 bool fInjected = false;
2191 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2192 /** @todo this really isn't nice, should properly handle this */
2193 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2194 fWakeupPending = true;
2195 if ( pVM->em.s.fIemExecutesAll
2196 && ( rc2 == VINF_EM_RESCHEDULE_REM
2197 || rc2 == VINF_EM_RESCHEDULE_HM
2198 || rc2 == VINF_EM_RESCHEDULE_RAW))
2199 {
2200 rc2 = VINF_EM_RESCHEDULE;
2201 }
2202#ifdef VBOX_STRICT
2203 if (fInjected)
2204 rcIrq = rc2;
2205#endif
2206 }
2207 UPDATE_RC();
2208 }
2209 else if ( fInSvmHwvirtMode
2210 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2211 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2212 {
2213 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2214 if (rc2 == VINF_NO_CHANGE)
2215 {
2216 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2217 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2218 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2219 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2220 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2221 rc2 = VINF_EM_RESCHEDULE;
2222#ifdef VBOX_STRICT
2223 rcIrq = rc2;
2224#endif
2225 }
2226 UPDATE_RC();
2227 }
2228 }
2229 }
2230 }
2231
2232 /*
2233 * Allocate handy pages.
2234 */
2235 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2236 {
2237 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2238 UPDATE_RC();
2239 }
2240
2241 /*
2242 * Debugger Facility request.
2243 */
2244 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2245 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2246 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2247 {
2248 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2249 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2250 UPDATE_RC();
2251 }
2252
2253 /*
2254 * EMT Rendezvous (must be serviced before termination).
2255 */
2256 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2257 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2258 {
2259 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2260 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2261 UPDATE_RC();
2262 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2263 * stopped/reset before the next VM state change is made. We need a better
2264 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2265 * && rc >= VINF_EM_SUSPEND). */
2266 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2267 {
2268 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2269 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2270 return rc;
2271 }
2272 }
2273
2274 /*
2275 * State change request (cleared by vmR3SetStateLocked).
2276 */
2277 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2278 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2279 {
2280 VMSTATE enmState = VMR3GetState(pVM);
2281 switch (enmState)
2282 {
2283 case VMSTATE_FATAL_ERROR:
2284 case VMSTATE_FATAL_ERROR_LS:
2285 case VMSTATE_GURU_MEDITATION:
2286 case VMSTATE_GURU_MEDITATION_LS:
2287 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2288 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2289 return VINF_EM_SUSPEND;
2290
2291 case VMSTATE_DESTROYING:
2292 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2293 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2294 return VINF_EM_TERMINATE;
2295
2296 default:
2297 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2298 }
2299 }
2300
2301 /*
2302 * Out of memory? Since most of our fellow high priority actions may cause us
2303 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2304 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2305 * than us since we can terminate without allocating more memory.
2306 */
2307 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2308 {
2309 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2310 UPDATE_RC();
2311 if (rc == VINF_EM_NO_MEMORY)
2312 return rc;
2313 }
2314
2315 /*
2316 * If the virtual sync clock is still stopped, make TM restart it.
2317 */
2318 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2319 TMR3VirtualSyncFF(pVM, pVCpu);
2320
2321#ifdef DEBUG
2322 /*
2323 * Debug, pause the VM.
2324 */
2325 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2326 {
2327 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2328 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2329 return VINF_EM_SUSPEND;
2330 }
2331#endif
2332
2333 /* check that we got them all */
2334 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2335 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2336 }
2337
2338#undef UPDATE_RC
2339 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2340 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2341 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2342 return rc;
2343}
2344
2345
2346/**
2347 * Check if the preset execution time cap restricts guest execution scheduling.
2348 *
2349 * @returns true if allowed, false otherwise
2350 * @param pVM The cross context VM structure.
2351 * @param pVCpu The cross context virtual CPU structure.
2352 */
2353bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2354{
2355 uint64_t u64UserTime, u64KernelTime;
2356
2357 if ( pVM->uCpuExecutionCap != 100
2358 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2359 {
2360 uint64_t u64TimeNow = RTTimeMilliTS();
2361 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2362 {
2363 /* New time slice. */
2364 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2365 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2366 pVCpu->em.s.u64TimeSliceExec = 0;
2367 }
2368 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2369
2370 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2371 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2372 return false;
2373 }
2374 return true;
2375}
2376
2377
2378/**
2379 * Execute VM.
2380 *
2381 * This function is the main loop of the VM. The emulation thread
2382 * calls this function when the VM has been successfully constructed
2383 * and we're ready for executing the VM.
2384 *
2385 * Returning from this function means that the VM is turned off or
2386 * suspended (state already saved) and deconstruction is next in line.
2387 *
2388 * All interaction from other thread are done using forced actions
2389 * and signalling of the wait object.
2390 *
2391 * @returns VBox status code, informational status codes may indicate failure.
2392 * @param pVM The cross context VM structure.
2393 * @param pVCpu The cross context virtual CPU structure.
2394 */
2395VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2396{
2397 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2398 pVM,
2399 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2400 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2401 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2402 VM_ASSERT_EMT(pVM);
2403 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2404 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2405 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2406 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2407
2408 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2409 if (rc == 0)
2410 {
2411 /*
2412 * Start the virtual time.
2413 */
2414 TMR3NotifyResume(pVM, pVCpu);
2415
2416 /*
2417 * The Outer Main Loop.
2418 */
2419 bool fFFDone = false;
2420
2421 /* Reschedule right away to start in the right state. */
2422 rc = VINF_SUCCESS;
2423
2424 /* If resuming after a pause or a state load, restore the previous
2425 state or else we'll start executing code. Else, just reschedule. */
2426 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2427 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2428 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2429 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2430 else
2431 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2432 pVCpu->em.s.cIemThenRemInstructions = 0;
2433 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2434
2435 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2436 for (;;)
2437 {
2438 /*
2439 * Before we can schedule anything (we're here because
2440 * scheduling is required) we must service any pending
2441 * forced actions to avoid any pending action causing
2442 * immediate rescheduling upon entering an inner loop
2443 *
2444 * Do forced actions.
2445 */
2446 if ( !fFFDone
2447 && RT_SUCCESS(rc)
2448 && rc != VINF_EM_TERMINATE
2449 && rc != VINF_EM_OFF
2450 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2451 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2452 {
2453 rc = emR3ForcedActions(pVM, pVCpu, rc);
2454 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2455 }
2456 else if (fFFDone)
2457 fFFDone = false;
2458
2459 /*
2460 * Now what to do?
2461 */
2462 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2463 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2464 switch (rc)
2465 {
2466 /*
2467 * Keep doing what we're currently doing.
2468 */
2469 case VINF_SUCCESS:
2470 break;
2471
2472 /*
2473 * Reschedule - to raw-mode execution.
2474 */
2475/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2476 case VINF_EM_RESCHEDULE_RAW:
2477 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2478 if (VM_IS_RAW_MODE_ENABLED(pVM))
2479 {
2480 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2481 pVCpu->em.s.enmState = EMSTATE_RAW;
2482 }
2483 else
2484 {
2485 AssertLogRelFailed();
2486 pVCpu->em.s.enmState = EMSTATE_NONE;
2487 }
2488 break;
2489
2490 /*
2491 * Reschedule - to HM or NEM.
2492 */
2493 case VINF_EM_RESCHEDULE_HM:
2494 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2495 if (VM_IS_HM_ENABLED(pVM))
2496 {
2497 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2498 pVCpu->em.s.enmState = EMSTATE_HM;
2499 }
2500 else if (VM_IS_NEM_ENABLED(pVM))
2501 {
2502 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2503 pVCpu->em.s.enmState = EMSTATE_NEM;
2504 }
2505 else
2506 {
2507 AssertLogRelFailed();
2508 pVCpu->em.s.enmState = EMSTATE_NONE;
2509 }
2510 break;
2511
2512 /*
2513 * Reschedule - to recompiled execution.
2514 */
2515 case VINF_EM_RESCHEDULE_REM:
2516 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2517 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2518 {
2519 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2520 enmOldState, EMSTATE_IEM_THEN_REM));
2521 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2522 {
2523 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2524 pVCpu->em.s.cIemThenRemInstructions = 0;
2525 }
2526 }
2527 else
2528 {
2529 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2530 pVCpu->em.s.enmState = EMSTATE_REM;
2531 }
2532 break;
2533
2534 /*
2535 * Resume.
2536 */
2537 case VINF_EM_RESUME:
2538 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2539 /* Don't reschedule in the halted or wait for SIPI case. */
2540 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2541 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2542 {
2543 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2544 break;
2545 }
2546 /* fall through and get scheduled. */
2547 RT_FALL_THRU();
2548
2549 /*
2550 * Reschedule.
2551 */
2552 case VINF_EM_RESCHEDULE:
2553 {
2554 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2555 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2556 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2557 pVCpu->em.s.cIemThenRemInstructions = 0;
2558 pVCpu->em.s.enmState = enmState;
2559 break;
2560 }
2561
2562 /*
2563 * Halted.
2564 */
2565 case VINF_EM_HALT:
2566 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2567 pVCpu->em.s.enmState = EMSTATE_HALTED;
2568 break;
2569
2570 /*
2571 * Switch to the wait for SIPI state (application processor only)
2572 */
2573 case VINF_EM_WAIT_SIPI:
2574 Assert(pVCpu->idCpu != 0);
2575 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2576 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2577 break;
2578
2579
2580 /*
2581 * Suspend.
2582 */
2583 case VINF_EM_SUSPEND:
2584 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2585 Assert(enmOldState != EMSTATE_SUSPENDED);
2586 pVCpu->em.s.enmPrevState = enmOldState;
2587 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2588 break;
2589
2590 /*
2591 * Reset.
2592 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2593 */
2594 case VINF_EM_RESET:
2595 {
2596 if (pVCpu->idCpu == 0)
2597 {
2598 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2599 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2600 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2601 pVCpu->em.s.cIemThenRemInstructions = 0;
2602 pVCpu->em.s.enmState = enmState;
2603 }
2604 else
2605 {
2606 /* All other VCPUs go into the wait for SIPI state. */
2607 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2608 }
2609 break;
2610 }
2611
2612 /*
2613 * Power Off.
2614 */
2615 case VINF_EM_OFF:
2616 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2617 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2618 TMR3NotifySuspend(pVM, pVCpu);
2619 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2620 return rc;
2621
2622 /*
2623 * Terminate the VM.
2624 */
2625 case VINF_EM_TERMINATE:
2626 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2627 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2628 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2629 TMR3NotifySuspend(pVM, pVCpu);
2630 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2631 return rc;
2632
2633
2634 /*
2635 * Out of memory, suspend the VM and stuff.
2636 */
2637 case VINF_EM_NO_MEMORY:
2638 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2639 Assert(enmOldState != EMSTATE_SUSPENDED);
2640 pVCpu->em.s.enmPrevState = enmOldState;
2641 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2642 TMR3NotifySuspend(pVM, pVCpu);
2643 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2644
2645 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2646 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2647 if (rc != VINF_EM_SUSPEND)
2648 {
2649 if (RT_SUCCESS_NP(rc))
2650 {
2651 AssertLogRelMsgFailed(("%Rrc\n", rc));
2652 rc = VERR_EM_INTERNAL_ERROR;
2653 }
2654 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2655 }
2656 return rc;
2657
2658 /*
2659 * Guest debug events.
2660 */
2661 case VINF_EM_DBG_STEPPED:
2662 case VINF_EM_DBG_STOP:
2663 case VINF_EM_DBG_EVENT:
2664 case VINF_EM_DBG_BREAKPOINT:
2665 case VINF_EM_DBG_STEP:
2666 if (enmOldState == EMSTATE_RAW)
2667 {
2668 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2669 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2670 }
2671 else if (enmOldState == EMSTATE_HM)
2672 {
2673 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2674 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2675 }
2676 else if (enmOldState == EMSTATE_NEM)
2677 {
2678 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2679 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2680 }
2681 else if (enmOldState == EMSTATE_REM)
2682 {
2683 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2684 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2685 }
2686 else
2687 {
2688 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2689 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2690 }
2691 break;
2692
2693 /*
2694 * Hypervisor debug events.
2695 */
2696 case VINF_EM_DBG_HYPER_STEPPED:
2697 case VINF_EM_DBG_HYPER_BREAKPOINT:
2698 case VINF_EM_DBG_HYPER_ASSERTION:
2699 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2700 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2701 break;
2702
2703 /*
2704 * Triple fault.
2705 */
2706 case VINF_EM_TRIPLE_FAULT:
2707 if (!pVM->em.s.fGuruOnTripleFault)
2708 {
2709 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2710 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2711 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2712 continue;
2713 }
2714 /* Else fall through and trigger a guru. */
2715 RT_FALL_THRU();
2716
2717 case VERR_VMM_RING0_ASSERTION:
2718 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2719 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2720 break;
2721
2722 /*
2723 * Any error code showing up here other than the ones we
2724 * know and process above are considered to be FATAL.
2725 *
2726 * Unknown warnings and informational status codes are also
2727 * included in this.
2728 */
2729 default:
2730 if (RT_SUCCESS_NP(rc))
2731 {
2732 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2733 rc = VERR_EM_INTERNAL_ERROR;
2734 }
2735 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2736 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2737 break;
2738 }
2739
2740 /*
2741 * Act on state transition.
2742 */
2743 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2744 if (enmOldState != enmNewState)
2745 {
2746 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2747
2748 /* Clear MWait flags and the unhalt FF. */
2749 if ( enmOldState == EMSTATE_HALTED
2750 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2751 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2752 && ( enmNewState == EMSTATE_RAW
2753 || enmNewState == EMSTATE_HM
2754 || enmNewState == EMSTATE_NEM
2755 || enmNewState == EMSTATE_REM
2756 || enmNewState == EMSTATE_IEM_THEN_REM
2757 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2758 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2759 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2760 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2761 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2762 {
2763 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2764 {
2765 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2766 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2767 }
2768 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2769 {
2770 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2771 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2772 }
2773 }
2774 }
2775 else
2776 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2777
2778 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2779 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2780
2781 /*
2782 * Act on the new state.
2783 */
2784 switch (enmNewState)
2785 {
2786 /*
2787 * Execute raw.
2788 */
2789 case EMSTATE_RAW:
2790 AssertLogRelMsgFailed(("%Rrc\n", rc));
2791 rc = VERR_EM_INTERNAL_ERROR;
2792 break;
2793
2794 /*
2795 * Execute hardware accelerated raw.
2796 */
2797 case EMSTATE_HM:
2798 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2799 break;
2800
2801 /*
2802 * Execute hardware accelerated raw.
2803 */
2804 case EMSTATE_NEM:
2805 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2806 break;
2807
2808 /*
2809 * Execute recompiled.
2810 */
2811 case EMSTATE_REM:
2812 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2813 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2814 break;
2815
2816 /*
2817 * Execute in the interpreter.
2818 */
2819 case EMSTATE_IEM:
2820 {
2821 uint32_t cInstructions = 0;
2822#if 0 /* For testing purposes. */
2823 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2824 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2825 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2826 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2827 rc = VINF_SUCCESS;
2828 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2829#endif
2830 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2831 if (pVM->em.s.fIemExecutesAll)
2832 {
2833 Assert(rc != VINF_EM_RESCHEDULE_REM);
2834 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2835 Assert(rc != VINF_EM_RESCHEDULE_HM);
2836#ifdef VBOX_HIGH_RES_TIMERS_HACK
2837 if (cInstructions < 2048)
2838 TMTimerPollVoid(pVM, pVCpu);
2839#endif
2840 }
2841 fFFDone = false;
2842 break;
2843 }
2844
2845 /*
2846 * Execute in IEM, hoping we can quickly switch aback to HM
2847 * or RAW execution. If our hopes fail, we go to REM.
2848 */
2849 case EMSTATE_IEM_THEN_REM:
2850 {
2851 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2852 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2853 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2854 break;
2855 }
2856
2857 /*
2858 * Application processor execution halted until SIPI.
2859 */
2860 case EMSTATE_WAIT_SIPI:
2861 /* no break */
2862 /*
2863 * hlt - execution halted until interrupt.
2864 */
2865 case EMSTATE_HALTED:
2866 {
2867 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2868 /* If HM (or someone else) store a pending interrupt in
2869 TRPM, it must be dispatched ASAP without any halting.
2870 Anything pending in TRPM has been accepted and the CPU
2871 should already be the right state to receive it. */
2872 if (TRPMHasTrap(pVCpu))
2873 rc = VINF_EM_RESCHEDULE;
2874 /* MWAIT has a special extension where it's woken up when
2875 an interrupt is pending even when IF=0. */
2876 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2877 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2878 {
2879 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2880 if (rc == VINF_SUCCESS)
2881 {
2882 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2883 APICUpdatePendingInterrupts(pVCpu);
2884
2885 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2886 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2887 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2888 {
2889 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2890 rc = VINF_EM_RESCHEDULE;
2891 }
2892 }
2893 }
2894 else
2895 {
2896 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2897 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2898 check VMCPU_FF_UPDATE_APIC here. */
2899 if ( rc == VINF_SUCCESS
2900 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2901 {
2902 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2903 rc = VINF_EM_RESCHEDULE;
2904 }
2905 }
2906
2907 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2908 break;
2909 }
2910
2911 /*
2912 * Suspended - return to VM.cpp.
2913 */
2914 case EMSTATE_SUSPENDED:
2915 TMR3NotifySuspend(pVM, pVCpu);
2916 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2917 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2918 return VINF_EM_SUSPEND;
2919
2920 /*
2921 * Debugging in the guest.
2922 */
2923 case EMSTATE_DEBUG_GUEST_RAW:
2924 case EMSTATE_DEBUG_GUEST_HM:
2925 case EMSTATE_DEBUG_GUEST_NEM:
2926 case EMSTATE_DEBUG_GUEST_IEM:
2927 case EMSTATE_DEBUG_GUEST_REM:
2928 TMR3NotifySuspend(pVM, pVCpu);
2929 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2930 TMR3NotifyResume(pVM, pVCpu);
2931 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2932 break;
2933
2934 /*
2935 * Debugging in the hypervisor.
2936 */
2937 case EMSTATE_DEBUG_HYPER:
2938 {
2939 TMR3NotifySuspend(pVM, pVCpu);
2940 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2941
2942 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2943 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2944 if (rc != VINF_SUCCESS)
2945 {
2946 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2947 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2948 else
2949 {
2950 /* switch to guru meditation mode */
2951 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2952 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2953 VMMR3FatalDump(pVM, pVCpu, rc);
2954 }
2955 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2956 return rc;
2957 }
2958
2959 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2960 TMR3NotifyResume(pVM, pVCpu);
2961 break;
2962 }
2963
2964 /*
2965 * Guru meditation takes place in the debugger.
2966 */
2967 case EMSTATE_GURU_MEDITATION:
2968 {
2969 TMR3NotifySuspend(pVM, pVCpu);
2970 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2971 VMMR3FatalDump(pVM, pVCpu, rc);
2972 emR3Debug(pVM, pVCpu, rc);
2973 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2974 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2975 return rc;
2976 }
2977
2978 /*
2979 * The states we don't expect here.
2980 */
2981 case EMSTATE_NONE:
2982 case EMSTATE_TERMINATING:
2983 default:
2984 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2985 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2986 TMR3NotifySuspend(pVM, pVCpu);
2987 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2988 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2989 return VERR_EM_INTERNAL_ERROR;
2990 }
2991 } /* The Outer Main Loop */
2992 }
2993 else
2994 {
2995 /*
2996 * Fatal error.
2997 */
2998 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2999 TMR3NotifySuspend(pVM, pVCpu);
3000 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3001 VMMR3FatalDump(pVM, pVCpu, rc);
3002 emR3Debug(pVM, pVCpu, rc);
3003 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3004 /** @todo change the VM state! */
3005 return rc;
3006 }
3007
3008 /* not reached */
3009}
3010
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette