VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 87531

Last change on this file since 87531 was 87130, checked in by vboxsync, 4 years ago

VMM: More owner handling code for bugref:9837

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 118.0 KB
Line 
1/* $Id: EM.cpp 87130 2020-12-27 19:32:53Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2020 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#include <VBox/vmm/apic.h>
50#include <VBox/vmm/tm.h>
51#include <VBox/vmm/mm.h>
52#include <VBox/vmm/ssm.h>
53#include <VBox/vmm/pdmapi.h>
54#include <VBox/vmm/pdmcritsect.h>
55#include <VBox/vmm/pdmqueue.h>
56#include <VBox/vmm/hm.h>
57#include "EMInternal.h"
58#include <VBox/vmm/vm.h>
59#include <VBox/vmm/uvm.h>
60#include <VBox/vmm/cpumdis.h>
61#include <VBox/dis.h>
62#include <VBox/disopcode.h>
63#include <VBox/err.h>
64#include "VMMTracing.h"
65
66#include <iprt/asm.h>
67#include <iprt/string.h>
68#include <iprt/stream.h>
69#include <iprt/thread.h>
70
71
72/*********************************************************************************************************************************
73* Internal Functions *
74*********************************************************************************************************************************/
75static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
76static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
77#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
78static const char *emR3GetStateName(EMSTATE enmState);
79#endif
80static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
81#if defined(VBOX_WITH_REM) || defined(DEBUG)
82static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
83#endif
84static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
85
86
87/**
88 * Initializes the EM.
89 *
90 * @returns VBox status code.
91 * @param pVM The cross context VM structure.
92 */
93VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
94{
95 LogFlow(("EMR3Init\n"));
96 /*
97 * Assert alignment and sizes.
98 */
99 AssertCompileMemberAlignment(VM, em.s, 32);
100 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
101 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
102 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
103
104 /*
105 * Init the structure.
106 */
107 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
108 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
109
110 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
111 AssertLogRelRCReturn(rc, rc);
112
113 bool fEnabled;
114 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
115 AssertLogRelRCReturn(rc, rc);
116 pVM->em.s.fGuruOnTripleFault = !fEnabled;
117 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
118 {
119 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
120 pVM->em.s.fGuruOnTripleFault = true;
121 }
122
123 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
124
125 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
126 * Whether to try correlate exit history in any context, detect hot spots and
127 * try optimize these using IEM if there are other exits close by. This
128 * overrides the context specific settings. */
129 bool fExitOptimizationEnabled = true;
130 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
131 AssertLogRelRCReturn(rc, rc);
132
133 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
134 * Whether to optimize exits in ring-0. Setting this to false will also disable
135 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
136 * capabilities of the host kernel, this optimization may be unavailable. */
137 bool fExitOptimizationEnabledR0 = true;
138 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
139 AssertLogRelRCReturn(rc, rc);
140 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
141
142 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
143 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
144 * hooks are in effect). */
145 /** @todo change the default to true here */
146 bool fExitOptimizationEnabledR0PreemptDisabled = true;
147 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
148 AssertLogRelRCReturn(rc, rc);
149 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
150
151 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
152 * Maximum number of instruction to let EMHistoryExec execute in one go. */
153 uint16_t cHistoryExecMaxInstructions = 8192;
154 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
155 AssertLogRelRCReturn(rc, rc);
156 if (cHistoryExecMaxInstructions < 16)
157 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
158
159 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
160 * Maximum number of instruction between exits during probing. */
161 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
162#ifdef RT_OS_WINDOWS
163 if (VM_IS_NEM_ENABLED(pVM))
164 cHistoryProbeMaxInstructionsWithoutExit = 32;
165#endif
166 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
167 cHistoryProbeMaxInstructionsWithoutExit);
168 AssertLogRelRCReturn(rc, rc);
169 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
170 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
171 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
172
173 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
174 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
175 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
176 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
177 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
178 cHistoryProbeMinInstructions);
179 AssertLogRelRCReturn(rc, rc);
180
181 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
182 {
183 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
184 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
185 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
186 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
187 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
188 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
189 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
190 }
191
192 /*
193 * Saved state.
194 */
195 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
196 NULL, NULL, NULL,
197 NULL, emR3Save, NULL,
198 NULL, emR3Load, NULL);
199 if (RT_FAILURE(rc))
200 return rc;
201
202 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
203 {
204 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
205
206 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
207 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
208 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
209 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
210
211# define EM_REG_COUNTER(a, b, c) \
212 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
213 AssertRC(rc);
214
215# define EM_REG_COUNTER_USED(a, b, c) \
216 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
217 AssertRC(rc);
218
219# define EM_REG_PROFILE(a, b, c) \
220 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
221 AssertRC(rc);
222
223# define EM_REG_PROFILE_ADV(a, b, c) \
224 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
225 AssertRC(rc);
226
227 /*
228 * Statistics.
229 */
230#ifdef VBOX_WITH_STATISTICS
231 PEMSTATS pStats;
232 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
233 if (RT_FAILURE(rc))
234 return rc;
235
236 pVCpu->em.s.pStatsR3 = pStats;
237 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
238
239# if 1 /* rawmode only? */
240 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
241 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
242 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%u/R3/PrivInst/Cli", "Number of cli instructions.");
243 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%u/R3/PrivInst/Sti", "Number of sli instructions.");
244 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%u/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
245 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%u/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
246 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%u/R3/PrivInst/Misc", "Number of misc. instructions.");
247 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%u/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
248 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%u/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
249 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%u/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
250 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%u/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
251 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%u/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
252 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%u/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
253 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%u/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
254 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%u/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
255 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%u/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
256 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%u/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
257 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%u/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
258 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%u/R3/PrivInst/Iret", "Number of iret instructions.");
259 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%u/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
260 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%u/R3/PrivInst/Lidt", "Number of lidt instructions.");
261 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%u/R3/PrivInst/Lldt", "Number of lldt instructions.");
262 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%u/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
263 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%u/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
264 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%u/R3/PrivInst/Syscall", "Number of syscall instructions.");
265 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%u/R3/PrivInst/Sysret", "Number of sysret instructions.");
266 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%u/Cli/Total", "Total number of cli instructions executed.");
267#endif
268 pVCpu->em.s.pCliStatTree = 0;
269
270 /* these should be considered for release statistics. */
271 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
272 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
273 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
274 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
275 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
276 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
277 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
278 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
279#endif /* VBOX_WITH_STATISTICS */
280 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
281 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
282#ifdef VBOX_WITH_STATISTICS
283 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
284 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
285 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
286 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
287 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
288 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
289#endif /* VBOX_WITH_STATISTICS */
290
291 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
292 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
293 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
294 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
295 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
296
297 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
298
299 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
300 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
301 AssertRC(rc);
302
303 /* History record statistics */
304 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
305 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
306 AssertRC(rc);
307
308 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
309 {
310 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
311 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
312 AssertRC(rc);
313 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
314 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
315 AssertRC(rc);
316 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
317 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
318 AssertRC(rc);
319 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
320 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
321 AssertRC(rc);
322 }
323
324 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
325 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
326 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
327 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
328 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
329 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
330 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
331 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
332 }
333
334 emR3InitDbg(pVM);
335 return VINF_SUCCESS;
336}
337
338
339/**
340 * Called when a VM initialization stage is completed.
341 *
342 * @returns VBox status code.
343 * @param pVM The cross context VM structure.
344 * @param enmWhat The initialization state that was completed.
345 */
346VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
347{
348 if (enmWhat == VMINITCOMPLETED_RING0)
349 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
350 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
351 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
352 return VINF_SUCCESS;
353}
354
355
356/**
357 * Applies relocations to data and code managed by this
358 * component. This function will be called at init and
359 * whenever the VMM need to relocate it self inside the GC.
360 *
361 * @param pVM The cross context VM structure.
362 */
363VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
364{
365 LogFlow(("EMR3Relocate\n"));
366 RT_NOREF(pVM);
367}
368
369
370/**
371 * Reset the EM state for a CPU.
372 *
373 * Called by EMR3Reset and hot plugging.
374 *
375 * @param pVCpu The cross context virtual CPU structure.
376 */
377VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
378{
379 /* Reset scheduling state. */
380 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
381
382 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
383 out of the HALTED state here so that enmPrevState doesn't end up as
384 HALTED when EMR3Execute returns. */
385 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
386 {
387 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
388 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
389 }
390}
391
392
393/**
394 * Reset notification.
395 *
396 * @param pVM The cross context VM structure.
397 */
398VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
399{
400 Log(("EMR3Reset: \n"));
401 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
402 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
403}
404
405
406/**
407 * Terminates the EM.
408 *
409 * Termination means cleaning up and freeing all resources,
410 * the VM it self is at this point powered off or suspended.
411 *
412 * @returns VBox status code.
413 * @param pVM The cross context VM structure.
414 */
415VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
416{
417 RT_NOREF(pVM);
418 return VINF_SUCCESS;
419}
420
421
422/**
423 * Execute state save operation.
424 *
425 * @returns VBox status code.
426 * @param pVM The cross context VM structure.
427 * @param pSSM SSM operation handle.
428 */
429static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
430{
431 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
432 {
433 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
434
435 SSMR3PutBool(pSSM, false /*fForceRAW*/);
436
437 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
438 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
439 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
440
441 /* Save mwait state. */
442 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
443 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
444 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
445 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
446 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
447 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
448 AssertRCReturn(rc, rc);
449 }
450 return VINF_SUCCESS;
451}
452
453
454/**
455 * Execute state load operation.
456 *
457 * @returns VBox status code.
458 * @param pVM The cross context VM structure.
459 * @param pSSM SSM operation handle.
460 * @param uVersion Data layout version.
461 * @param uPass The data pass.
462 */
463static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
464{
465 /*
466 * Validate version.
467 */
468 if ( uVersion > EM_SAVED_STATE_VERSION
469 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
470 {
471 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
472 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
473 }
474 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
475
476 /*
477 * Load the saved state.
478 */
479 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
480 {
481 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
482
483 bool fForceRAWIgnored;
484 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
485 AssertRCReturn(rc, rc);
486
487 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
488 {
489 SSM_GET_ENUM32_RET(pSSM, pVCpu->em.s.enmPrevState, EMSTATE);
490 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
491
492 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
493 }
494 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
495 {
496 /* Load mwait state. */
497 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
498 AssertRCReturn(rc, rc);
499 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
500 AssertRCReturn(rc, rc);
501 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
502 AssertRCReturn(rc, rc);
503 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
504 AssertRCReturn(rc, rc);
505 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
506 AssertRCReturn(rc, rc);
507 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
508 AssertRCReturn(rc, rc);
509 }
510
511 Assert(!pVCpu->em.s.pCliStatTree);
512 }
513 return VINF_SUCCESS;
514}
515
516
517/**
518 * Argument packet for emR3SetExecutionPolicy.
519 */
520struct EMR3SETEXECPOLICYARGS
521{
522 EMEXECPOLICY enmPolicy;
523 bool fEnforce;
524};
525
526
527/**
528 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
529 */
530static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
531{
532 /*
533 * Only the first CPU changes the variables.
534 */
535 if (pVCpu->idCpu == 0)
536 {
537 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
538 switch (pArgs->enmPolicy)
539 {
540 case EMEXECPOLICY_RECOMPILE_RING0:
541 case EMEXECPOLICY_RECOMPILE_RING3:
542 break;
543 case EMEXECPOLICY_IEM_ALL:
544 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
545 break;
546 default:
547 AssertFailedReturn(VERR_INVALID_PARAMETER);
548 }
549 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
550 }
551
552 /*
553 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
554 */
555 return pVCpu->em.s.enmState == EMSTATE_RAW
556 || pVCpu->em.s.enmState == EMSTATE_HM
557 || pVCpu->em.s.enmState == EMSTATE_NEM
558 || pVCpu->em.s.enmState == EMSTATE_IEM
559 || pVCpu->em.s.enmState == EMSTATE_REM
560 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
561 ? VINF_EM_RESCHEDULE
562 : VINF_SUCCESS;
563}
564
565
566/**
567 * Changes an execution scheduling policy parameter.
568 *
569 * This is used to enable or disable raw-mode / hardware-virtualization
570 * execution of user and supervisor code.
571 *
572 * @returns VINF_SUCCESS on success.
573 * @returns VINF_RESCHEDULE if a rescheduling might be required.
574 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
575 *
576 * @param pUVM The user mode VM handle.
577 * @param enmPolicy The scheduling policy to change.
578 * @param fEnforce Whether to enforce the policy or not.
579 */
580VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
581{
582 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
583 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
584 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
585
586 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
587 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
588}
589
590
591/**
592 * Queries an execution scheduling policy parameter.
593 *
594 * @returns VBox status code
595 * @param pUVM The user mode VM handle.
596 * @param enmPolicy The scheduling policy to query.
597 * @param pfEnforced Where to return the current value.
598 */
599VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
600{
601 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
602 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
603 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
604 PVM pVM = pUVM->pVM;
605 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
606
607 /* No need to bother EMTs with a query. */
608 switch (enmPolicy)
609 {
610 case EMEXECPOLICY_RECOMPILE_RING0:
611 case EMEXECPOLICY_RECOMPILE_RING3:
612 *pfEnforced = false;
613 break;
614 case EMEXECPOLICY_IEM_ALL:
615 *pfEnforced = pVM->em.s.fIemExecutesAll;
616 break;
617 default:
618 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
619 }
620
621 return VINF_SUCCESS;
622}
623
624
625/**
626 * Queries the main execution engine of the VM.
627 *
628 * @returns VBox status code
629 * @param pUVM The user mode VM handle.
630 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
631 */
632VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
633{
634 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
635 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
636
637 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
638 PVM pVM = pUVM->pVM;
639 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
640
641 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
642 return VINF_SUCCESS;
643}
644
645
646/**
647 * Raise a fatal error.
648 *
649 * Safely terminate the VM with full state report and stuff. This function
650 * will naturally never return.
651 *
652 * @param pVCpu The cross context virtual CPU structure.
653 * @param rc VBox status code.
654 */
655VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
656{
657 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
658 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
659}
660
661
662#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
663/**
664 * Gets the EM state name.
665 *
666 * @returns pointer to read only state name,
667 * @param enmState The state.
668 */
669static const char *emR3GetStateName(EMSTATE enmState)
670{
671 switch (enmState)
672 {
673 case EMSTATE_NONE: return "EMSTATE_NONE";
674 case EMSTATE_RAW: return "EMSTATE_RAW";
675 case EMSTATE_HM: return "EMSTATE_HM";
676 case EMSTATE_IEM: return "EMSTATE_IEM";
677 case EMSTATE_REM: return "EMSTATE_REM";
678 case EMSTATE_HALTED: return "EMSTATE_HALTED";
679 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
680 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
681 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
682 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
683 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
684 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
685 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
686 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
687 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
688 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
689 case EMSTATE_NEM: return "EMSTATE_NEM";
690 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
691 default: return "Unknown!";
692 }
693}
694#endif /* LOG_ENABLED || VBOX_STRICT */
695
696
697/**
698 * Handle pending ring-3 I/O port write.
699 *
700 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
701 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
702 *
703 * @returns Strict VBox status code.
704 * @param pVM The cross context VM structure.
705 * @param pVCpu The cross context virtual CPU structure.
706 */
707VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
708{
709 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
710
711 /* Get and clear the pending data. */
712 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
713 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
714 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
715 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
716 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
717
718 /* Assert sanity. */
719 switch (cbValue)
720 {
721 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
722 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
723 case 4: break;
724 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
725 }
726 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
727
728 /* Do the work.*/
729 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
730 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
731 if (IOM_SUCCESS(rcStrict))
732 {
733 pVCpu->cpum.GstCtx.rip += cbInstr;
734 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
735 }
736 return rcStrict;
737}
738
739
740/**
741 * Handle pending ring-3 I/O port write.
742 *
743 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
744 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
745 *
746 * @returns Strict VBox status code.
747 * @param pVM The cross context VM structure.
748 * @param pVCpu The cross context virtual CPU structure.
749 */
750VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
751{
752 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
753
754 /* Get and clear the pending data. */
755 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
756 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
757 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
758 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
759
760 /* Assert sanity. */
761 switch (cbValue)
762 {
763 case 1: break;
764 case 2: break;
765 case 4: break;
766 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
767 }
768 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
769 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
770
771 /* Do the work.*/
772 uint32_t uValue = 0;
773 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
774 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
775 if (IOM_SUCCESS(rcStrict))
776 {
777 if (cbValue == 4)
778 pVCpu->cpum.GstCtx.rax = uValue;
779 else if (cbValue == 2)
780 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
781 else
782 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
783 pVCpu->cpum.GstCtx.rip += cbInstr;
784 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
785 }
786 return rcStrict;
787}
788
789
790/**
791 * Debug loop.
792 *
793 * @returns VBox status code for EM.
794 * @param pVM The cross context VM structure.
795 * @param pVCpu The cross context virtual CPU structure.
796 * @param rc Current EM VBox status code.
797 */
798static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
799{
800 for (;;)
801 {
802 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
803 const VBOXSTRICTRC rcLast = rc;
804
805 /*
806 * Debug related RC.
807 */
808 switch (VBOXSTRICTRC_VAL(rc))
809 {
810 /*
811 * Single step an instruction.
812 */
813 case VINF_EM_DBG_STEP:
814 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
815 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
816 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
817 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
818 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
819 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
820 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
821#ifdef VBOX_WITH_REM /** @todo fix me? */
822 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
823 rc = emR3RemStep(pVM, pVCpu);
824#endif
825 else
826 {
827 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
828 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
829 rc = VINF_EM_DBG_STEPPED;
830 }
831 break;
832
833 /*
834 * Simple events: stepped, breakpoint, stop/assertion.
835 */
836 case VINF_EM_DBG_STEPPED:
837 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
838 break;
839
840 case VINF_EM_DBG_BREAKPOINT:
841#ifdef VBOX_WITH_LOTS_OF_DBGF_BPS
842 rc = DBGFR3BpHit(pVM, pVCpu);
843#else
844 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
845#endif
846 break;
847
848 case VINF_EM_DBG_STOP:
849 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
850 break;
851
852 case VINF_EM_DBG_EVENT:
853 rc = DBGFR3EventHandlePending(pVM, pVCpu);
854 break;
855
856 case VINF_EM_DBG_HYPER_STEPPED:
857 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
858 break;
859
860 case VINF_EM_DBG_HYPER_BREAKPOINT:
861 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
862 break;
863
864 case VINF_EM_DBG_HYPER_ASSERTION:
865 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
866 RTLogFlush(NULL);
867 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
868 break;
869
870 /*
871 * Guru meditation.
872 */
873 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
874 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
875 break;
876 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
877 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
878 break;
879 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
880 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
881 break;
882
883 default: /** @todo don't use default for guru, but make special errors code! */
884 {
885 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
886 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
887 break;
888 }
889 }
890
891 /*
892 * Process the result.
893 */
894 switch (VBOXSTRICTRC_VAL(rc))
895 {
896 /*
897 * Continue the debugging loop.
898 */
899 case VINF_EM_DBG_STEP:
900 case VINF_EM_DBG_STOP:
901 case VINF_EM_DBG_EVENT:
902 case VINF_EM_DBG_STEPPED:
903 case VINF_EM_DBG_BREAKPOINT:
904 case VINF_EM_DBG_HYPER_STEPPED:
905 case VINF_EM_DBG_HYPER_BREAKPOINT:
906 case VINF_EM_DBG_HYPER_ASSERTION:
907 break;
908
909 /*
910 * Resuming execution (in some form) has to be done here if we got
911 * a hypervisor debug event.
912 */
913 case VINF_SUCCESS:
914 case VINF_EM_RESUME:
915 case VINF_EM_SUSPEND:
916 case VINF_EM_RESCHEDULE:
917 case VINF_EM_RESCHEDULE_RAW:
918 case VINF_EM_RESCHEDULE_REM:
919 case VINF_EM_HALT:
920 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
921 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
922 if (rc == VINF_SUCCESS)
923 rc = VINF_EM_RESCHEDULE;
924 return rc;
925
926 /*
927 * The debugger isn't attached.
928 * We'll simply turn the thing off since that's the easiest thing to do.
929 */
930 case VERR_DBGF_NOT_ATTACHED:
931 switch (VBOXSTRICTRC_VAL(rcLast))
932 {
933 case VINF_EM_DBG_HYPER_STEPPED:
934 case VINF_EM_DBG_HYPER_BREAKPOINT:
935 case VINF_EM_DBG_HYPER_ASSERTION:
936 case VERR_TRPM_PANIC:
937 case VERR_TRPM_DONT_PANIC:
938 case VERR_VMM_RING0_ASSERTION:
939 case VERR_VMM_HYPER_CR3_MISMATCH:
940 case VERR_VMM_RING3_CALL_DISABLED:
941 return rcLast;
942 }
943 return VINF_EM_OFF;
944
945 /*
946 * Status codes terminating the VM in one or another sense.
947 */
948 case VINF_EM_TERMINATE:
949 case VINF_EM_OFF:
950 case VINF_EM_RESET:
951 case VINF_EM_NO_MEMORY:
952 case VINF_EM_RAW_STALE_SELECTOR:
953 case VINF_EM_RAW_IRET_TRAP:
954 case VERR_TRPM_PANIC:
955 case VERR_TRPM_DONT_PANIC:
956 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
957 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
958 case VERR_VMM_RING0_ASSERTION:
959 case VERR_VMM_HYPER_CR3_MISMATCH:
960 case VERR_VMM_RING3_CALL_DISABLED:
961 case VERR_INTERNAL_ERROR:
962 case VERR_INTERNAL_ERROR_2:
963 case VERR_INTERNAL_ERROR_3:
964 case VERR_INTERNAL_ERROR_4:
965 case VERR_INTERNAL_ERROR_5:
966 case VERR_IPE_UNEXPECTED_STATUS:
967 case VERR_IPE_UNEXPECTED_INFO_STATUS:
968 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
969 return rc;
970
971 /*
972 * The rest is unexpected, and will keep us here.
973 */
974 default:
975 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
976 break;
977 }
978 } /* debug for ever */
979}
980
981
982#if defined(VBOX_WITH_REM) || defined(DEBUG)
983/**
984 * Steps recompiled code.
985 *
986 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
987 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
988 *
989 * @param pVM The cross context VM structure.
990 * @param pVCpu The cross context virtual CPU structure.
991 */
992static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
993{
994 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
995
996 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
997
998 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
999 return rc;
1000}
1001#endif /* VBOX_WITH_REM || DEBUG */
1002
1003
1004/**
1005 * Executes recompiled code.
1006 *
1007 * This function contains the recompiler version of the inner
1008 * execution loop (the outer loop being in EMR3ExecuteVM()).
1009 *
1010 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1011 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1012 *
1013 * @param pVM The cross context VM structure.
1014 * @param pVCpu The cross context virtual CPU structure.
1015 * @param pfFFDone Where to store an indicator telling whether or not
1016 * FFs were done before returning.
1017 *
1018 */
1019static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1020{
1021#ifdef LOG_ENABLED
1022 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1023
1024 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1025 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1026 else
1027 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1028#endif
1029 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1030
1031#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1032 AssertMsg( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1033 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1034 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1035#endif
1036
1037 /*
1038 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1039 * or the REM suggests raw-mode execution.
1040 */
1041 *pfFFDone = false;
1042 uint32_t cLoops = 0;
1043 int rc = VINF_SUCCESS;
1044 for (;;)
1045 {
1046 /*
1047 * Execute REM.
1048 */
1049 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1050 {
1051 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1052 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1053 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1054 }
1055 else
1056 {
1057 /* Give up this time slice; virtual time continues */
1058 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1059 RTThreadSleep(5);
1060 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1061 rc = VINF_SUCCESS;
1062 }
1063
1064 /*
1065 * Deal with high priority post execution FFs before doing anything
1066 * else. Sync back the state and leave the lock to be on the safe side.
1067 */
1068 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1069 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1070 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1071
1072 /*
1073 * Process the returned status code.
1074 */
1075 if (rc != VINF_SUCCESS)
1076 {
1077 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1078 break;
1079 if (rc != VINF_REM_INTERRUPED_FF)
1080 {
1081 /* Try dodge unimplemented IEM trouble by reschduling. */
1082 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1083 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1084 {
1085 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1086 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1087 {
1088 rc = VINF_EM_RESCHEDULE;
1089 break;
1090 }
1091 }
1092
1093 /*
1094 * Anything which is not known to us means an internal error
1095 * and the termination of the VM!
1096 */
1097 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1098 break;
1099 }
1100 }
1101
1102
1103 /*
1104 * Check and execute forced actions.
1105 *
1106 * Sync back the VM state and leave the lock before calling any of
1107 * these, you never know what's going to happen here.
1108 */
1109#ifdef VBOX_HIGH_RES_TIMERS_HACK
1110 TMTimerPollVoid(pVM, pVCpu);
1111#endif
1112 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1113 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1114 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1115 {
1116 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1117 rc = emR3ForcedActions(pVM, pVCpu, rc);
1118 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1119 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1120 if ( rc != VINF_SUCCESS
1121 && rc != VINF_EM_RESCHEDULE_REM)
1122 {
1123 *pfFFDone = true;
1124 break;
1125 }
1126 }
1127
1128 /*
1129 * Have to check if we can get back to fast execution mode every so often.
1130 */
1131 if (!(++cLoops & 7))
1132 {
1133 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1134 if ( enmCheck != EMSTATE_REM
1135 && enmCheck != EMSTATE_IEM_THEN_REM)
1136 return VINF_EM_RESCHEDULE;
1137 }
1138
1139 } /* The Inner Loop, recompiled execution mode version. */
1140
1141 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1142 return rc;
1143}
1144
1145
1146#ifdef DEBUG
1147
1148int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1149{
1150 EMSTATE enmOldState = pVCpu->em.s.enmState;
1151
1152 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1153
1154 Log(("Single step BEGIN:\n"));
1155 for (uint32_t i = 0; i < cIterations; i++)
1156 {
1157 DBGFR3PrgStep(pVCpu);
1158 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1159 emR3RemStep(pVM, pVCpu);
1160 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1161 break;
1162 }
1163 Log(("Single step END:\n"));
1164 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1165 pVCpu->em.s.enmState = enmOldState;
1166 return VINF_EM_RESCHEDULE;
1167}
1168
1169#endif /* DEBUG */
1170
1171
1172/**
1173 * Try execute the problematic code in IEM first, then fall back on REM if there
1174 * is too much of it or if IEM doesn't implement something.
1175 *
1176 * @returns Strict VBox status code from IEMExecLots.
1177 * @param pVM The cross context VM structure.
1178 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1179 * @param pfFFDone Force flags done indicator.
1180 *
1181 * @thread EMT(pVCpu)
1182 */
1183static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1184{
1185 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1186 *pfFFDone = false;
1187
1188 /*
1189 * Execute in IEM for a while.
1190 */
1191 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1192 {
1193 uint32_t cInstructions;
1194 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1195 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1196 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1197 if (rcStrict != VINF_SUCCESS)
1198 {
1199 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1200 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1201 break;
1202
1203 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1204 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1205 return rcStrict;
1206 }
1207
1208 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1209 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1210 {
1211 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1212 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1213 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1214 pVCpu->em.s.enmState = enmNewState;
1215 return VINF_SUCCESS;
1216 }
1217
1218 /*
1219 * Check for pending actions.
1220 */
1221 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1222 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1223 return VINF_SUCCESS;
1224 }
1225
1226 /*
1227 * Switch to REM.
1228 */
1229 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1230 pVCpu->em.s.enmState = EMSTATE_REM;
1231 return VINF_SUCCESS;
1232}
1233
1234
1235/**
1236 * Decides whether to execute RAW, HWACC or REM.
1237 *
1238 * @returns new EM state
1239 * @param pVM The cross context VM structure.
1240 * @param pVCpu The cross context virtual CPU structure.
1241 */
1242EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1243{
1244 /*
1245 * We stay in the wait for SIPI state unless explicitly told otherwise.
1246 */
1247 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1248 return EMSTATE_WAIT_SIPI;
1249
1250 /*
1251 * Execute everything in IEM?
1252 */
1253 if (pVM->em.s.fIemExecutesAll)
1254 return EMSTATE_IEM;
1255
1256 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1257 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1258 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1259
1260 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1261 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1262 {
1263 if (VM_IS_HM_ENABLED(pVM))
1264 {
1265 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1266 return EMSTATE_HM;
1267 }
1268 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1269 return EMSTATE_NEM;
1270
1271 /*
1272 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1273 * turns off monitoring features essential for raw mode!
1274 */
1275 return EMSTATE_IEM_THEN_REM;
1276 }
1277
1278 /*
1279 * Standard raw-mode:
1280 *
1281 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1282 * or 32 bits protected mode ring 0 code
1283 *
1284 * The tests are ordered by the likelihood of being true during normal execution.
1285 */
1286 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1287 {
1288 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1289 return EMSTATE_REM;
1290 }
1291
1292# ifndef VBOX_RAW_V86
1293 if (EFlags.u32 & X86_EFL_VM) {
1294 Log2(("raw mode refused: VM_MASK\n"));
1295 return EMSTATE_REM;
1296 }
1297# endif
1298
1299 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1300 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1301 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1302 {
1303 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1304 return EMSTATE_REM;
1305 }
1306
1307 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1308 {
1309 uint32_t u32Dummy, u32Features;
1310
1311 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1312 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1313 return EMSTATE_REM;
1314 }
1315
1316 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1317 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1318 || (uSS & X86_SEL_RPL) == 3)
1319 {
1320 if (!(EFlags.u32 & X86_EFL_IF))
1321 {
1322 Log2(("raw mode refused: IF (RawR3)\n"));
1323 return EMSTATE_REM;
1324 }
1325
1326 if (!(u32CR0 & X86_CR0_WP))
1327 {
1328 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1329 return EMSTATE_REM;
1330 }
1331 }
1332 else
1333 {
1334 /* Only ring 0 supervisor code. */
1335 if ((uSS & X86_SEL_RPL) != 0)
1336 {
1337 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1338 return EMSTATE_REM;
1339 }
1340
1341 // Let's start with pure 32 bits ring 0 code first
1342 /** @todo What's pure 32-bit mode? flat? */
1343 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1344 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1345 {
1346 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1347 return EMSTATE_REM;
1348 }
1349
1350 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1351 if (!(u32CR0 & X86_CR0_WP))
1352 {
1353 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1354 return EMSTATE_REM;
1355 }
1356
1357# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1358 if (!(EFlags.u32 & X86_EFL_IF))
1359 {
1360 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1361 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1362 return EMSTATE_REM;
1363 }
1364# endif
1365
1366# ifndef VBOX_WITH_RAW_RING1
1367 /** @todo still necessary??? */
1368 if (EFlags.Bits.u2IOPL != 0)
1369 {
1370 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1371 return EMSTATE_REM;
1372 }
1373# endif
1374 }
1375
1376 /*
1377 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1378 */
1379 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1380 {
1381 Log2(("raw mode refused: stale CS\n"));
1382 return EMSTATE_REM;
1383 }
1384 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1385 {
1386 Log2(("raw mode refused: stale SS\n"));
1387 return EMSTATE_REM;
1388 }
1389 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1390 {
1391 Log2(("raw mode refused: stale DS\n"));
1392 return EMSTATE_REM;
1393 }
1394 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1395 {
1396 Log2(("raw mode refused: stale ES\n"));
1397 return EMSTATE_REM;
1398 }
1399 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1400 {
1401 Log2(("raw mode refused: stale FS\n"));
1402 return EMSTATE_REM;
1403 }
1404 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1405 {
1406 Log2(("raw mode refused: stale GS\n"));
1407 return EMSTATE_REM;
1408 }
1409
1410# ifdef VBOX_WITH_SAFE_STR
1411 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1412 {
1413 Log(("Raw mode refused -> TR=0\n"));
1414 return EMSTATE_REM;
1415 }
1416# endif
1417
1418 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1419 return EMSTATE_RAW;
1420}
1421
1422
1423/**
1424 * Executes all high priority post execution force actions.
1425 *
1426 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1427 * fatal error status code.
1428 *
1429 * @param pVM The cross context VM structure.
1430 * @param pVCpu The cross context virtual CPU structure.
1431 * @param rc The current strict VBox status code rc.
1432 */
1433VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1434{
1435 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1436
1437 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1438 PDMCritSectBothFF(pVCpu);
1439
1440 /* Update CR3 (Nested Paging case for HM). */
1441 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1442 {
1443 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1444 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1445 if (RT_FAILURE(rc2))
1446 return rc2;
1447 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1448 }
1449
1450 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1451 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1452 {
1453 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1454 if (CPUMIsGuestInPAEMode(pVCpu))
1455 {
1456 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1457 AssertPtr(pPdpes);
1458
1459 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1460 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1461 }
1462 else
1463 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1464 }
1465
1466 /* IEM has pending work (typically memory write after INS instruction). */
1467 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1468 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1469
1470 /* IOM has pending work (comitting an I/O or MMIO write). */
1471 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1472 {
1473 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1474 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1475 { /* half likely, or at least it's a line shorter. */ }
1476 else if (rc == VINF_SUCCESS)
1477 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1478 else
1479 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1480 }
1481
1482 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1483 {
1484 if ( rc > VINF_EM_NO_MEMORY
1485 && rc <= VINF_EM_LAST)
1486 rc = VINF_EM_NO_MEMORY;
1487 }
1488
1489 return rc;
1490}
1491
1492
1493/**
1494 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1495 *
1496 * @returns VBox status code.
1497 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1498 * @param pVCpu The cross context virtual CPU structure.
1499 */
1500static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1501{
1502#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1503 /* Handle the "external interrupt" VM-exit intercept. */
1504 if (CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT))
1505 {
1506 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1507 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1508 && rcStrict != VINF_VMX_VMEXIT
1509 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1510 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1511 return VBOXSTRICTRC_TODO(rcStrict);
1512 }
1513#else
1514 RT_NOREF(pVCpu);
1515#endif
1516 return VINF_NO_CHANGE;
1517}
1518
1519
1520/**
1521 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1522 *
1523 * @returns VBox status code.
1524 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1525 * @param pVCpu The cross context virtual CPU structure.
1526 */
1527static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1528{
1529#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1530 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1531 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1532 {
1533 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1534 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1535 if (RT_SUCCESS(rcStrict))
1536 {
1537 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1538 && rcStrict != VINF_SVM_VMEXIT
1539 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1540 return VBOXSTRICTRC_VAL(rcStrict);
1541 }
1542
1543 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1544 return VINF_EM_TRIPLE_FAULT;
1545 }
1546#else
1547 NOREF(pVCpu);
1548#endif
1549 return VINF_NO_CHANGE;
1550}
1551
1552
1553/**
1554 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1555 *
1556 * @returns VBox status code.
1557 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1558 * @param pVCpu The cross context virtual CPU structure.
1559 */
1560static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1561{
1562#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1563 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1564 {
1565 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1566 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1567 if (RT_SUCCESS(rcStrict))
1568 {
1569 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1570 Assert(rcStrict != VINF_SVM_VMEXIT);
1571 return VBOXSTRICTRC_VAL(rcStrict);
1572 }
1573 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1574 return VINF_EM_TRIPLE_FAULT;
1575 }
1576#else
1577 NOREF(pVCpu);
1578#endif
1579 return VINF_NO_CHANGE;
1580}
1581
1582
1583/**
1584 * Executes all pending forced actions.
1585 *
1586 * Forced actions can cause execution delays and execution
1587 * rescheduling. The first we deal with using action priority, so
1588 * that for instance pending timers aren't scheduled and ran until
1589 * right before execution. The rescheduling we deal with using
1590 * return codes. The same goes for VM termination, only in that case
1591 * we exit everything.
1592 *
1593 * @returns VBox status code of equal or greater importance/severity than rc.
1594 * The most important ones are: VINF_EM_RESCHEDULE,
1595 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1596 *
1597 * @param pVM The cross context VM structure.
1598 * @param pVCpu The cross context virtual CPU structure.
1599 * @param rc The current rc.
1600 *
1601 */
1602int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1603{
1604 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1605#ifdef VBOX_STRICT
1606 int rcIrq = VINF_SUCCESS;
1607#endif
1608 int rc2;
1609#define UPDATE_RC() \
1610 do { \
1611 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1612 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1613 break; \
1614 if (!rc || rc2 < rc) \
1615 rc = rc2; \
1616 } while (0)
1617 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1618
1619 /*
1620 * Post execution chunk first.
1621 */
1622 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1623 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1624 {
1625 /*
1626 * EMT Rendezvous (must be serviced before termination).
1627 */
1628 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1629 {
1630 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1631 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1632 UPDATE_RC();
1633 /** @todo HACK ALERT! The following test is to make sure EM+TM
1634 * thinks the VM is stopped/reset before the next VM state change
1635 * is made. We need a better solution for this, or at least make it
1636 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1637 * VINF_EM_SUSPEND). */
1638 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1639 {
1640 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1641 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1642 return rc;
1643 }
1644 }
1645
1646 /*
1647 * State change request (cleared by vmR3SetStateLocked).
1648 */
1649 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1650 {
1651 VMSTATE enmState = VMR3GetState(pVM);
1652 switch (enmState)
1653 {
1654 case VMSTATE_FATAL_ERROR:
1655 case VMSTATE_FATAL_ERROR_LS:
1656 case VMSTATE_GURU_MEDITATION:
1657 case VMSTATE_GURU_MEDITATION_LS:
1658 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1659 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1660 return VINF_EM_SUSPEND;
1661
1662 case VMSTATE_DESTROYING:
1663 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1664 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1665 return VINF_EM_TERMINATE;
1666
1667 default:
1668 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1669 }
1670 }
1671
1672 /*
1673 * Debugger Facility polling.
1674 */
1675 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1676 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1677 {
1678 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1679 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1680 UPDATE_RC();
1681 }
1682
1683 /*
1684 * Postponed reset request.
1685 */
1686 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1687 {
1688 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1689 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1690 UPDATE_RC();
1691 }
1692
1693 /*
1694 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1695 */
1696 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1697 {
1698 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1699 UPDATE_RC();
1700 if (rc == VINF_EM_NO_MEMORY)
1701 return rc;
1702 }
1703
1704 /* check that we got them all */
1705 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1706 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1707 }
1708
1709 /*
1710 * Normal priority then.
1711 * (Executed in no particular order.)
1712 */
1713 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1714 {
1715 /*
1716 * PDM Queues are pending.
1717 */
1718 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1719 PDMR3QueueFlushAll(pVM);
1720
1721 /*
1722 * PDM DMA transfers are pending.
1723 */
1724 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1725 PDMR3DmaRun(pVM);
1726
1727 /*
1728 * EMT Rendezvous (make sure they are handled before the requests).
1729 */
1730 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1731 {
1732 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1733 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1734 UPDATE_RC();
1735 /** @todo HACK ALERT! The following test is to make sure EM+TM
1736 * thinks the VM is stopped/reset before the next VM state change
1737 * is made. We need a better solution for this, or at least make it
1738 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1739 * VINF_EM_SUSPEND). */
1740 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1741 {
1742 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1743 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1744 return rc;
1745 }
1746 }
1747
1748 /*
1749 * Requests from other threads.
1750 */
1751 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1752 {
1753 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1754 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1755 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1756 {
1757 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1758 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1759 return rc2;
1760 }
1761 UPDATE_RC();
1762 /** @todo HACK ALERT! The following test is to make sure EM+TM
1763 * thinks the VM is stopped/reset before the next VM state change
1764 * is made. We need a better solution for this, or at least make it
1765 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1766 * VINF_EM_SUSPEND). */
1767 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1768 {
1769 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1770 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1771 return rc;
1772 }
1773 }
1774
1775 /* check that we got them all */
1776 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_EMT_RENDEZVOUS));
1777 }
1778
1779 /*
1780 * Normal priority then. (per-VCPU)
1781 * (Executed in no particular order.)
1782 */
1783 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1784 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1785 {
1786 /*
1787 * Requests from other threads.
1788 */
1789 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1790 {
1791 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1792 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1793 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1794 {
1795 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1796 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1797 return rc2;
1798 }
1799 UPDATE_RC();
1800 /** @todo HACK ALERT! The following test is to make sure EM+TM
1801 * thinks the VM is stopped/reset before the next VM state change
1802 * is made. We need a better solution for this, or at least make it
1803 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1804 * VINF_EM_SUSPEND). */
1805 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1806 {
1807 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1808 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1809 return rc;
1810 }
1811 }
1812
1813 /* check that we got them all */
1814 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1815 }
1816
1817 /*
1818 * High priority pre execution chunk last.
1819 * (Executed in ascending priority order.)
1820 */
1821 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1822 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1823 {
1824 /*
1825 * Timers before interrupts.
1826 */
1827 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1828 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1829 TMR3TimerQueuesDo(pVM);
1830
1831 /*
1832 * Pick up asynchronously posted interrupts into the APIC.
1833 */
1834 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1835 APICUpdatePendingInterrupts(pVCpu);
1836
1837 /*
1838 * The instruction following an emulated STI should *always* be executed!
1839 *
1840 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1841 * the eip is the same as the inhibited instr address. Before we
1842 * are able to execute this instruction in raw mode (iret to
1843 * guest code) an external interrupt might force a world switch
1844 * again. Possibly allowing a guest interrupt to be dispatched
1845 * in the process. This could break the guest. Sounds very
1846 * unlikely, but such timing sensitive problem are not as rare as
1847 * you might think.
1848 */
1849 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1850 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1851 {
1852 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1853 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1854 {
1855 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1856 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1857 }
1858 else
1859 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1860 }
1861
1862 /** @todo SMIs. If we implement SMIs, this is where they will have to be
1863 * delivered. */
1864
1865#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1866 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
1867 {
1868 /*
1869 * VMX Nested-guest APIC-write pending (can cause VM-exits).
1870 * Takes priority over even SMI and INIT signals.
1871 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
1872 */
1873 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
1874 {
1875 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
1876 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1877 UPDATE_RC();
1878 }
1879
1880 /*
1881 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
1882 * Takes priority over "Traps on the previous instruction".
1883 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
1884 */
1885 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
1886 {
1887 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
1888 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1889 UPDATE_RC();
1890 }
1891
1892 /*
1893 * VMX Nested-guest preemption timer VM-exit.
1894 * Takes priority over NMI-window VM-exits.
1895 */
1896 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
1897 {
1898 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
1899 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1900 UPDATE_RC();
1901 }
1902 }
1903#endif
1904
1905 /*
1906 * Guest event injection.
1907 */
1908 bool fWakeupPending = false;
1909 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1910 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1911 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
1912 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
1913 {
1914 bool fInVmxNonRootMode;
1915 bool fInSvmHwvirtMode;
1916 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
1917 if (fInNestedGuest)
1918 {
1919 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
1920 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
1921 }
1922 else
1923 {
1924 fInVmxNonRootMode = false;
1925 fInSvmHwvirtMode = false;
1926 }
1927
1928 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
1929 if (fGif)
1930 {
1931#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1932 /*
1933 * VMX NMI-window VM-exit.
1934 * Takes priority over non-maskable interrupts (NMIs).
1935 * Interrupt shadows block NMI-window VM-exits.
1936 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
1937 *
1938 * See Intel spec. 25.2 "Other Causes Of VM Exits".
1939 * See Intel spec. 26.7.6 "NMI-Window Exiting".
1940 */
1941 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
1942 && !CPUMIsGuestVmxVirtNmiBlocking(&pVCpu->cpum.GstCtx))
1943 {
1944 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
1945 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
1946 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
1947 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
1948 && rc2 != VINF_PGM_CHANGE_MODE
1949 && rc2 != VINF_VMX_VMEXIT
1950 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1951 UPDATE_RC();
1952 }
1953 else
1954#endif
1955 /*
1956 * NMIs (take priority over external interrupts).
1957 */
1958 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
1959 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
1960 {
1961#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1962 if ( fInVmxNonRootMode
1963 && CPUMIsGuestVmxPinCtlsSet(&pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
1964 {
1965 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
1966 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
1967 UPDATE_RC();
1968 }
1969 else
1970#endif
1971#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1972 if ( fInSvmHwvirtMode
1973 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
1974 {
1975 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
1976 AssertMsg( rc2 != VINF_PGM_CHANGE_MODE
1977 && rc2 != VINF_SVM_VMEXIT
1978 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
1979 UPDATE_RC();
1980 }
1981 else
1982#endif
1983 {
1984 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
1985 if (rc2 == VINF_SUCCESS)
1986 {
1987 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
1988 fWakeupPending = true;
1989 if (pVM->em.s.fIemExecutesAll)
1990 rc2 = VINF_EM_RESCHEDULE;
1991 else
1992 {
1993 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
1994 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
1995 : VINF_EM_RESCHEDULE_REM;
1996 }
1997 }
1998 UPDATE_RC();
1999 }
2000 }
2001#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2002 /*
2003 * VMX Interrupt-window VM-exits.
2004 * Takes priority over external interrupts.
2005 */
2006 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
2007 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
2008 {
2009 Assert(CPUMIsGuestVmxProcCtlsSet(&pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
2010 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
2011 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
2012 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2013 && rc2 != VINF_PGM_CHANGE_MODE
2014 && rc2 != VINF_VMX_VMEXIT
2015 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2016 UPDATE_RC();
2017 }
2018#endif
2019#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2020 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
2021 * actually pending like we currently do. */
2022#endif
2023 /*
2024 * External interrupts.
2025 */
2026 else
2027 {
2028 /*
2029 * VMX: virtual interrupts takes priority over physical interrupts.
2030 * SVM: physical interrupts takes priority over virtual interrupts.
2031 */
2032 if ( fInVmxNonRootMode
2033 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2034 && CPUMIsGuestVmxVirtIntrEnabled(&pVCpu->cpum.GstCtx))
2035 {
2036 /** @todo NSTVMX: virtual-interrupt delivery. */
2037 rc2 = VINF_SUCCESS;
2038 }
2039 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2040 && CPUMIsGuestPhysIntrEnabled(pVCpu))
2041 {
2042 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2043 if (fInVmxNonRootMode)
2044 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
2045 else if (fInSvmHwvirtMode)
2046 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
2047 else
2048 rc2 = VINF_NO_CHANGE;
2049
2050 if (rc2 == VINF_NO_CHANGE)
2051 {
2052 bool fInjected = false;
2053 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2054 /** @todo this really isn't nice, should properly handle this */
2055 /* Note! This can still cause a VM-exit (on Intel). */
2056 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2057 fWakeupPending = true;
2058 if ( pVM->em.s.fIemExecutesAll
2059 && ( rc2 == VINF_EM_RESCHEDULE_REM
2060 || rc2 == VINF_EM_RESCHEDULE_HM
2061 || rc2 == VINF_EM_RESCHEDULE_RAW))
2062 {
2063 rc2 = VINF_EM_RESCHEDULE;
2064 }
2065#ifdef VBOX_STRICT
2066 if (fInjected)
2067 rcIrq = rc2;
2068#endif
2069 }
2070 UPDATE_RC();
2071 }
2072 else if ( fInSvmHwvirtMode
2073 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2074 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2075 {
2076 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2077 if (rc2 == VINF_NO_CHANGE)
2078 {
2079 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2080 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2081 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2082 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2083 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2084 rc2 = VINF_EM_RESCHEDULE;
2085#ifdef VBOX_STRICT
2086 rcIrq = rc2;
2087#endif
2088 }
2089 UPDATE_RC();
2090 }
2091 }
2092 }
2093 }
2094
2095 /*
2096 * Allocate handy pages.
2097 */
2098 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2099 {
2100 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2101 UPDATE_RC();
2102 }
2103
2104 /*
2105 * Debugger Facility request.
2106 */
2107 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2108 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2109 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2110 {
2111 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2112 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2113 UPDATE_RC();
2114 }
2115
2116 /*
2117 * EMT Rendezvous (must be serviced before termination).
2118 */
2119 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2120 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2121 {
2122 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2123 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2124 UPDATE_RC();
2125 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2126 * stopped/reset before the next VM state change is made. We need a better
2127 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2128 * && rc >= VINF_EM_SUSPEND). */
2129 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2130 {
2131 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2132 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2133 return rc;
2134 }
2135 }
2136
2137 /*
2138 * State change request (cleared by vmR3SetStateLocked).
2139 */
2140 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2141 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2142 {
2143 VMSTATE enmState = VMR3GetState(pVM);
2144 switch (enmState)
2145 {
2146 case VMSTATE_FATAL_ERROR:
2147 case VMSTATE_FATAL_ERROR_LS:
2148 case VMSTATE_GURU_MEDITATION:
2149 case VMSTATE_GURU_MEDITATION_LS:
2150 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2151 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2152 return VINF_EM_SUSPEND;
2153
2154 case VMSTATE_DESTROYING:
2155 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2156 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2157 return VINF_EM_TERMINATE;
2158
2159 default:
2160 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2161 }
2162 }
2163
2164 /*
2165 * Out of memory? Since most of our fellow high priority actions may cause us
2166 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2167 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2168 * than us since we can terminate without allocating more memory.
2169 */
2170 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2171 {
2172 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2173 UPDATE_RC();
2174 if (rc == VINF_EM_NO_MEMORY)
2175 return rc;
2176 }
2177
2178 /*
2179 * If the virtual sync clock is still stopped, make TM restart it.
2180 */
2181 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2182 TMR3VirtualSyncFF(pVM, pVCpu);
2183
2184#ifdef DEBUG
2185 /*
2186 * Debug, pause the VM.
2187 */
2188 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2189 {
2190 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2191 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2192 return VINF_EM_SUSPEND;
2193 }
2194#endif
2195
2196 /* check that we got them all */
2197 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2198 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2199 }
2200
2201#undef UPDATE_RC
2202 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2203 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2204 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2205 return rc;
2206}
2207
2208
2209/**
2210 * Check if the preset execution time cap restricts guest execution scheduling.
2211 *
2212 * @returns true if allowed, false otherwise
2213 * @param pVM The cross context VM structure.
2214 * @param pVCpu The cross context virtual CPU structure.
2215 */
2216bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2217{
2218 uint64_t u64UserTime, u64KernelTime;
2219
2220 if ( pVM->uCpuExecutionCap != 100
2221 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2222 {
2223 uint64_t u64TimeNow = RTTimeMilliTS();
2224 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2225 {
2226 /* New time slice. */
2227 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2228 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2229 pVCpu->em.s.u64TimeSliceExec = 0;
2230 }
2231 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2232
2233 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2234 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2235 return false;
2236 }
2237 return true;
2238}
2239
2240
2241/**
2242 * Execute VM.
2243 *
2244 * This function is the main loop of the VM. The emulation thread
2245 * calls this function when the VM has been successfully constructed
2246 * and we're ready for executing the VM.
2247 *
2248 * Returning from this function means that the VM is turned off or
2249 * suspended (state already saved) and deconstruction is next in line.
2250 *
2251 * All interaction from other thread are done using forced actions
2252 * and signalling of the wait object.
2253 *
2254 * @returns VBox status code, informational status codes may indicate failure.
2255 * @param pVM The cross context VM structure.
2256 * @param pVCpu The cross context virtual CPU structure.
2257 */
2258VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2259{
2260 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2261 pVM,
2262 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2263 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2264 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2265 VM_ASSERT_EMT(pVM);
2266 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2267 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2268 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2269 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2270
2271 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2272 if (rc == 0)
2273 {
2274 /*
2275 * Start the virtual time.
2276 */
2277 TMR3NotifyResume(pVM, pVCpu);
2278
2279 /*
2280 * The Outer Main Loop.
2281 */
2282 bool fFFDone = false;
2283
2284 /* Reschedule right away to start in the right state. */
2285 rc = VINF_SUCCESS;
2286
2287 /* If resuming after a pause or a state load, restore the previous
2288 state or else we'll start executing code. Else, just reschedule. */
2289 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2290 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2291 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2292 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2293 else
2294 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2295 pVCpu->em.s.cIemThenRemInstructions = 0;
2296 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2297
2298 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2299 for (;;)
2300 {
2301 /*
2302 * Before we can schedule anything (we're here because
2303 * scheduling is required) we must service any pending
2304 * forced actions to avoid any pending action causing
2305 * immediate rescheduling upon entering an inner loop
2306 *
2307 * Do forced actions.
2308 */
2309 if ( !fFFDone
2310 && RT_SUCCESS(rc)
2311 && rc != VINF_EM_TERMINATE
2312 && rc != VINF_EM_OFF
2313 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2314 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2315 {
2316 rc = emR3ForcedActions(pVM, pVCpu, rc);
2317 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2318 }
2319 else if (fFFDone)
2320 fFFDone = false;
2321
2322 /*
2323 * Now what to do?
2324 */
2325 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2326 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2327 switch (rc)
2328 {
2329 /*
2330 * Keep doing what we're currently doing.
2331 */
2332 case VINF_SUCCESS:
2333 break;
2334
2335 /*
2336 * Reschedule - to raw-mode execution.
2337 */
2338/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2339 case VINF_EM_RESCHEDULE_RAW:
2340 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2341 if (VM_IS_RAW_MODE_ENABLED(pVM))
2342 {
2343 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2344 pVCpu->em.s.enmState = EMSTATE_RAW;
2345 }
2346 else
2347 {
2348 AssertLogRelFailed();
2349 pVCpu->em.s.enmState = EMSTATE_NONE;
2350 }
2351 break;
2352
2353 /*
2354 * Reschedule - to HM or NEM.
2355 */
2356 case VINF_EM_RESCHEDULE_HM:
2357 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2358 if (VM_IS_HM_ENABLED(pVM))
2359 {
2360 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2361 pVCpu->em.s.enmState = EMSTATE_HM;
2362 }
2363 else if (VM_IS_NEM_ENABLED(pVM))
2364 {
2365 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2366 pVCpu->em.s.enmState = EMSTATE_NEM;
2367 }
2368 else
2369 {
2370 AssertLogRelFailed();
2371 pVCpu->em.s.enmState = EMSTATE_NONE;
2372 }
2373 break;
2374
2375 /*
2376 * Reschedule - to recompiled execution.
2377 */
2378 case VINF_EM_RESCHEDULE_REM:
2379 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2380 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2381 {
2382 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2383 enmOldState, EMSTATE_IEM_THEN_REM));
2384 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2385 {
2386 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2387 pVCpu->em.s.cIemThenRemInstructions = 0;
2388 }
2389 }
2390 else
2391 {
2392 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2393 pVCpu->em.s.enmState = EMSTATE_REM;
2394 }
2395 break;
2396
2397 /*
2398 * Resume.
2399 */
2400 case VINF_EM_RESUME:
2401 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2402 /* Don't reschedule in the halted or wait for SIPI case. */
2403 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2404 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2405 {
2406 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2407 break;
2408 }
2409 /* fall through and get scheduled. */
2410 RT_FALL_THRU();
2411
2412 /*
2413 * Reschedule.
2414 */
2415 case VINF_EM_RESCHEDULE:
2416 {
2417 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2418 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2419 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2420 pVCpu->em.s.cIemThenRemInstructions = 0;
2421 pVCpu->em.s.enmState = enmState;
2422 break;
2423 }
2424
2425 /*
2426 * Halted.
2427 */
2428 case VINF_EM_HALT:
2429 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2430 pVCpu->em.s.enmState = EMSTATE_HALTED;
2431 break;
2432
2433 /*
2434 * Switch to the wait for SIPI state (application processor only)
2435 */
2436 case VINF_EM_WAIT_SIPI:
2437 Assert(pVCpu->idCpu != 0);
2438 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2439 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2440 break;
2441
2442
2443 /*
2444 * Suspend.
2445 */
2446 case VINF_EM_SUSPEND:
2447 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2448 Assert(enmOldState != EMSTATE_SUSPENDED);
2449 pVCpu->em.s.enmPrevState = enmOldState;
2450 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2451 break;
2452
2453 /*
2454 * Reset.
2455 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2456 */
2457 case VINF_EM_RESET:
2458 {
2459 if (pVCpu->idCpu == 0)
2460 {
2461 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2462 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2463 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2464 pVCpu->em.s.cIemThenRemInstructions = 0;
2465 pVCpu->em.s.enmState = enmState;
2466 }
2467 else
2468 {
2469 /* All other VCPUs go into the wait for SIPI state. */
2470 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2471 }
2472 break;
2473 }
2474
2475 /*
2476 * Power Off.
2477 */
2478 case VINF_EM_OFF:
2479 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2480 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2481 TMR3NotifySuspend(pVM, pVCpu);
2482 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2483 return rc;
2484
2485 /*
2486 * Terminate the VM.
2487 */
2488 case VINF_EM_TERMINATE:
2489 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2490 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2491 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2492 TMR3NotifySuspend(pVM, pVCpu);
2493 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2494 return rc;
2495
2496
2497 /*
2498 * Out of memory, suspend the VM and stuff.
2499 */
2500 case VINF_EM_NO_MEMORY:
2501 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2502 Assert(enmOldState != EMSTATE_SUSPENDED);
2503 pVCpu->em.s.enmPrevState = enmOldState;
2504 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2505 TMR3NotifySuspend(pVM, pVCpu);
2506 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2507
2508 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2509 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2510 if (rc != VINF_EM_SUSPEND)
2511 {
2512 if (RT_SUCCESS_NP(rc))
2513 {
2514 AssertLogRelMsgFailed(("%Rrc\n", rc));
2515 rc = VERR_EM_INTERNAL_ERROR;
2516 }
2517 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2518 }
2519 return rc;
2520
2521 /*
2522 * Guest debug events.
2523 */
2524 case VINF_EM_DBG_STEPPED:
2525 case VINF_EM_DBG_STOP:
2526 case VINF_EM_DBG_EVENT:
2527 case VINF_EM_DBG_BREAKPOINT:
2528 case VINF_EM_DBG_STEP:
2529 if (enmOldState == EMSTATE_RAW)
2530 {
2531 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2532 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2533 }
2534 else if (enmOldState == EMSTATE_HM)
2535 {
2536 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2537 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2538 }
2539 else if (enmOldState == EMSTATE_NEM)
2540 {
2541 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2542 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2543 }
2544 else if (enmOldState == EMSTATE_REM)
2545 {
2546 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2547 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2548 }
2549 else
2550 {
2551 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2552 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2553 }
2554 break;
2555
2556 /*
2557 * Hypervisor debug events.
2558 */
2559 case VINF_EM_DBG_HYPER_STEPPED:
2560 case VINF_EM_DBG_HYPER_BREAKPOINT:
2561 case VINF_EM_DBG_HYPER_ASSERTION:
2562 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2563 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2564 break;
2565
2566 /*
2567 * Triple fault.
2568 */
2569 case VINF_EM_TRIPLE_FAULT:
2570 if (!pVM->em.s.fGuruOnTripleFault)
2571 {
2572 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2573 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2574 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2575 continue;
2576 }
2577 /* Else fall through and trigger a guru. */
2578 RT_FALL_THRU();
2579
2580 case VERR_VMM_RING0_ASSERTION:
2581 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2582 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2583 break;
2584
2585 /*
2586 * Any error code showing up here other than the ones we
2587 * know and process above are considered to be FATAL.
2588 *
2589 * Unknown warnings and informational status codes are also
2590 * included in this.
2591 */
2592 default:
2593 if (RT_SUCCESS_NP(rc))
2594 {
2595 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2596 rc = VERR_EM_INTERNAL_ERROR;
2597 }
2598 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2599 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2600 break;
2601 }
2602
2603 /*
2604 * Act on state transition.
2605 */
2606 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2607 if (enmOldState != enmNewState)
2608 {
2609 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2610
2611 /* Clear MWait flags and the unhalt FF. */
2612 if ( enmOldState == EMSTATE_HALTED
2613 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2614 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2615 && ( enmNewState == EMSTATE_RAW
2616 || enmNewState == EMSTATE_HM
2617 || enmNewState == EMSTATE_NEM
2618 || enmNewState == EMSTATE_REM
2619 || enmNewState == EMSTATE_IEM_THEN_REM
2620 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2621 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2622 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2623 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2624 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2625 {
2626 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2627 {
2628 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2629 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2630 }
2631 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2632 {
2633 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2634 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2635 }
2636 }
2637 }
2638 else
2639 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2640
2641 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2642 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2643
2644 /*
2645 * Act on the new state.
2646 */
2647 switch (enmNewState)
2648 {
2649 /*
2650 * Execute raw.
2651 */
2652 case EMSTATE_RAW:
2653 AssertLogRelMsgFailed(("%Rrc\n", rc));
2654 rc = VERR_EM_INTERNAL_ERROR;
2655 break;
2656
2657 /*
2658 * Execute hardware accelerated raw.
2659 */
2660 case EMSTATE_HM:
2661 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2662 break;
2663
2664 /*
2665 * Execute hardware accelerated raw.
2666 */
2667 case EMSTATE_NEM:
2668 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2669 break;
2670
2671 /*
2672 * Execute recompiled.
2673 */
2674 case EMSTATE_REM:
2675 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2676 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2677 break;
2678
2679 /*
2680 * Execute in the interpreter.
2681 */
2682 case EMSTATE_IEM:
2683 {
2684 uint32_t cInstructions = 0;
2685#if 0 /* For testing purposes. */
2686 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2687 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2688 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2689 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2690 rc = VINF_SUCCESS;
2691 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2692#endif
2693 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2694 if (pVM->em.s.fIemExecutesAll)
2695 {
2696 Assert(rc != VINF_EM_RESCHEDULE_REM);
2697 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2698 Assert(rc != VINF_EM_RESCHEDULE_HM);
2699#ifdef VBOX_HIGH_RES_TIMERS_HACK
2700 if (cInstructions < 2048)
2701 TMTimerPollVoid(pVM, pVCpu);
2702#endif
2703 }
2704 fFFDone = false;
2705 break;
2706 }
2707
2708 /*
2709 * Execute in IEM, hoping we can quickly switch aback to HM
2710 * or RAW execution. If our hopes fail, we go to REM.
2711 */
2712 case EMSTATE_IEM_THEN_REM:
2713 {
2714 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2715 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2716 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2717 break;
2718 }
2719
2720 /*
2721 * Application processor execution halted until SIPI.
2722 */
2723 case EMSTATE_WAIT_SIPI:
2724 /* no break */
2725 /*
2726 * hlt - execution halted until interrupt.
2727 */
2728 case EMSTATE_HALTED:
2729 {
2730 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2731 /* If HM (or someone else) store a pending interrupt in
2732 TRPM, it must be dispatched ASAP without any halting.
2733 Anything pending in TRPM has been accepted and the CPU
2734 should already be the right state to receive it. */
2735 if (TRPMHasTrap(pVCpu))
2736 rc = VINF_EM_RESCHEDULE;
2737 /* MWAIT has a special extension where it's woken up when
2738 an interrupt is pending even when IF=0. */
2739 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2740 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2741 {
2742 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2743 if (rc == VINF_SUCCESS)
2744 {
2745 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2746 APICUpdatePendingInterrupts(pVCpu);
2747
2748 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2749 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2750 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2751 {
2752 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2753 rc = VINF_EM_RESCHEDULE;
2754 }
2755 }
2756 }
2757 else
2758 {
2759 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2760 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2761 check VMCPU_FF_UPDATE_APIC here. */
2762 if ( rc == VINF_SUCCESS
2763 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2764 {
2765 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2766 rc = VINF_EM_RESCHEDULE;
2767 }
2768 }
2769
2770 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2771 break;
2772 }
2773
2774 /*
2775 * Suspended - return to VM.cpp.
2776 */
2777 case EMSTATE_SUSPENDED:
2778 TMR3NotifySuspend(pVM, pVCpu);
2779 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2780 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2781 return VINF_EM_SUSPEND;
2782
2783 /*
2784 * Debugging in the guest.
2785 */
2786 case EMSTATE_DEBUG_GUEST_RAW:
2787 case EMSTATE_DEBUG_GUEST_HM:
2788 case EMSTATE_DEBUG_GUEST_NEM:
2789 case EMSTATE_DEBUG_GUEST_IEM:
2790 case EMSTATE_DEBUG_GUEST_REM:
2791 TMR3NotifySuspend(pVM, pVCpu);
2792 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2793 TMR3NotifyResume(pVM, pVCpu);
2794 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2795 break;
2796
2797 /*
2798 * Debugging in the hypervisor.
2799 */
2800 case EMSTATE_DEBUG_HYPER:
2801 {
2802 TMR3NotifySuspend(pVM, pVCpu);
2803 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2804
2805 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2806 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2807 if (rc != VINF_SUCCESS)
2808 {
2809 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2810 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2811 else
2812 {
2813 /* switch to guru meditation mode */
2814 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2815 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2816 VMMR3FatalDump(pVM, pVCpu, rc);
2817 }
2818 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2819 return rc;
2820 }
2821
2822 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2823 TMR3NotifyResume(pVM, pVCpu);
2824 break;
2825 }
2826
2827 /*
2828 * Guru meditation takes place in the debugger.
2829 */
2830 case EMSTATE_GURU_MEDITATION:
2831 {
2832 TMR3NotifySuspend(pVM, pVCpu);
2833 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2834 VMMR3FatalDump(pVM, pVCpu, rc);
2835 emR3Debug(pVM, pVCpu, rc);
2836 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2837 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2838 return rc;
2839 }
2840
2841 /*
2842 * The states we don't expect here.
2843 */
2844 case EMSTATE_NONE:
2845 case EMSTATE_TERMINATING:
2846 default:
2847 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2848 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2849 TMR3NotifySuspend(pVM, pVCpu);
2850 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2851 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2852 return VERR_EM_INTERNAL_ERROR;
2853 }
2854 } /* The Outer Main Loop */
2855 }
2856 else
2857 {
2858 /*
2859 * Fatal error.
2860 */
2861 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2862 TMR3NotifySuspend(pVM, pVCpu);
2863 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2864 VMMR3FatalDump(pVM, pVCpu, rc);
2865 emR3Debug(pVM, pVCpu, rc);
2866 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2867 /** @todo change the VM state! */
2868 return rc;
2869 }
2870
2871 /* not reached */
2872}
2873
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette