VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 81071

Last change on this file since 81071 was 80815, checked in by vboxsync, 5 years ago

VMM: "guest hypervisor" -> "nested hypervisor".

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 122.0 KB
Line 
1/* $Id: EM.cpp 80815 2019-09-16 09:22:23Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/nem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#include <VBox/vmm/apic.h>
53#include <VBox/vmm/tm.h>
54#include <VBox/vmm/mm.h>
55#include <VBox/vmm/ssm.h>
56#include <VBox/vmm/pdmapi.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/pdmqueue.h>
59#include <VBox/vmm/hm.h>
60#include "EMInternal.h"
61#include <VBox/vmm/vm.h>
62#include <VBox/vmm/uvm.h>
63#include <VBox/vmm/cpumdis.h>
64#include <VBox/dis.h>
65#include <VBox/disopcode.h>
66#include <VBox/err.h>
67#include "VMMTracing.h"
68
69#include <iprt/asm.h>
70#include <iprt/string.h>
71#include <iprt/stream.h>
72#include <iprt/thread.h>
73
74
75/*********************************************************************************************************************************
76* Internal Functions *
77*********************************************************************************************************************************/
78static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
79static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
80#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
81static const char *emR3GetStateName(EMSTATE enmState);
82#endif
83static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
84#if defined(VBOX_WITH_REM) || defined(DEBUG)
85static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
86#endif
87static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
88
89
90/**
91 * Initializes the EM.
92 *
93 * @returns VBox status code.
94 * @param pVM The cross context VM structure.
95 */
96VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
97{
98 LogFlow(("EMR3Init\n"));
99 /*
100 * Assert alignment and sizes.
101 */
102 AssertCompileMemberAlignment(VM, em.s, 32);
103 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
104 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s.u.FatalLongJump) <= RT_SIZEOFMEMB(VMCPU, em.s.u.achPaddingFatalLongJump));
105 AssertCompile(RT_SIZEOFMEMB(VMCPU, em.s) <= RT_SIZEOFMEMB(VMCPU, em.padding));
106
107 /*
108 * Init the structure.
109 */
110 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
111 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
112
113 int rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
114 AssertLogRelRCReturn(rc, rc);
115
116 bool fEnabled;
117 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
118 AssertLogRelRCReturn(rc, rc);
119 pVM->em.s.fGuruOnTripleFault = !fEnabled;
120 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
121 {
122 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
123 pVM->em.s.fGuruOnTripleFault = true;
124 }
125
126 LogRel(("EMR3Init: fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n", pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
127
128 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
129 * Whether to try correlate exit history in any context, detect hot spots and
130 * try optimize these using IEM if there are other exits close by. This
131 * overrides the context specific settings. */
132 bool fExitOptimizationEnabled = true;
133 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
134 AssertLogRelRCReturn(rc, rc);
135
136 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
137 * Whether to optimize exits in ring-0. Setting this to false will also disable
138 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
139 * capabilities of the host kernel, this optimization may be unavailable. */
140 bool fExitOptimizationEnabledR0 = true;
141 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
142 AssertLogRelRCReturn(rc, rc);
143 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
144
145 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
146 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
147 * hooks are in effect). */
148 /** @todo change the default to true here */
149 bool fExitOptimizationEnabledR0PreemptDisabled = true;
150 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
151 AssertLogRelRCReturn(rc, rc);
152 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
153
154 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
155 * Maximum number of instruction to let EMHistoryExec execute in one go. */
156 uint16_t cHistoryExecMaxInstructions = 8192;
157 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
158 AssertLogRelRCReturn(rc, rc);
159 if (cHistoryExecMaxInstructions < 16)
160 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
161
162 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
163 * Maximum number of instruction between exits during probing. */
164 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
165#ifdef RT_OS_WINDOWS
166 if (VM_IS_NEM_ENABLED(pVM))
167 cHistoryProbeMaxInstructionsWithoutExit = 32;
168#endif
169 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
170 cHistoryProbeMaxInstructionsWithoutExit);
171 AssertLogRelRCReturn(rc, rc);
172 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
173 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
174 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
175
176 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
177 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
178 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
179 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
180 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
181 cHistoryProbeMinInstructions);
182 AssertLogRelRCReturn(rc, rc);
183
184 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
185 {
186 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
187 pVCpu->em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
188 pVCpu->em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
189 pVCpu->em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
190 pVCpu->em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
191 pVCpu->em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
192 pVCpu->em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
193 }
194
195#ifdef VBOX_WITH_REM
196 /*
197 * Initialize the REM critical section.
198 */
199 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
200 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
201 AssertRCReturn(rc, rc);
202#endif
203
204 /*
205 * Saved state.
206 */
207 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
208 NULL, NULL, NULL,
209 NULL, emR3Save, NULL,
210 NULL, emR3Load, NULL);
211 if (RT_FAILURE(rc))
212 return rc;
213
214 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
215 {
216 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
217
218 pVCpu->em.s.enmState = idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
219 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
220 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
221 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
222
223# define EM_REG_COUNTER(a, b, c) \
224 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, idCpu); \
225 AssertRC(rc);
226
227# define EM_REG_COUNTER_USED(a, b, c) \
228 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, idCpu); \
229 AssertRC(rc);
230
231# define EM_REG_PROFILE(a, b, c) \
232 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
233 AssertRC(rc);
234
235# define EM_REG_PROFILE_ADV(a, b, c) \
236 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, idCpu); \
237 AssertRC(rc);
238
239 /*
240 * Statistics.
241 */
242#ifdef VBOX_WITH_STATISTICS
243 PEMSTATS pStats;
244 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
245 if (RT_FAILURE(rc))
246 return rc;
247
248 pVCpu->em.s.pStatsR3 = pStats;
249 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
250
251# if 1 /* rawmode only? */
252 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%u/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
253 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%u/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
254 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%u/R3/PrivInst/Cli", "Number of cli instructions.");
255 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%u/R3/PrivInst/Sti", "Number of sli instructions.");
256 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%u/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
257 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%u/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
258 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%u/R3/PrivInst/Misc", "Number of misc. instructions.");
259 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%u/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
260 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%u/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
261 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%u/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
262 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%u/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
263 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%u/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
264 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%u/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
265 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%u/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
266 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%u/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
267 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%u/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
268 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%u/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
269 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%u/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
270 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%u/R3/PrivInst/Iret", "Number of iret instructions.");
271 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%u/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
272 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%u/R3/PrivInst/Lidt", "Number of lidt instructions.");
273 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%u/R3/PrivInst/Lldt", "Number of lldt instructions.");
274 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%u/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
275 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%u/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
276 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%u/R3/PrivInst/Syscall", "Number of syscall instructions.");
277 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%u/R3/PrivInst/Sysret", "Number of sysret instructions.");
278 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%u/Cli/Total", "Total number of cli instructions executed.");
279#endif
280 pVCpu->em.s.pCliStatTree = 0;
281
282 /* these should be considered for release statistics. */
283 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%u/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
284 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%u/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
285 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%u/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
286 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%u/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
287 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%u/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
288 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%u/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
289 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%u/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
290 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%u/EM/NEMEnter", "Profiling NEM entry overhead.");
291#endif /* VBOX_WITH_STATISTICS */
292 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%u/EM/NEMExec", "Profiling NEM execution.");
293 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%u/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
294#ifdef VBOX_WITH_STATISTICS
295 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%u/EM/REMEmuSingle", "Profiling single instruction REM execution.");
296 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%u/EM/REMExec", "Profiling REM execution.");
297 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%u/EM/REMSync", "Profiling REM context syncing.");
298 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%u/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
299 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%u/EM/RAWExec", "Profiling Raw Mode execution.");
300 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%u/EM/RAWTail", "Profiling Raw Mode tail overhead.");
301#endif /* VBOX_WITH_STATISTICS */
302
303 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%u/EM/ForcedActions", "Profiling forced action execution.");
304 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%u/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
305 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%u/EM/Capped", "Profiling capped state (sleep).");
306 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%u/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
307 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%u/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
308
309 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%u/EM/Total", "Profiling EMR3ExecuteVM.");
310
311 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
312 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", idCpu);
313 AssertRC(rc);
314
315 /* History record statistics */
316 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
317 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", idCpu);
318 AssertRC(rc);
319
320 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
321 {
322 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
323 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", idCpu, iStep);
324 AssertRC(rc);
325 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
326 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", idCpu, iStep);
327 AssertRC(rc);
328 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
329 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", idCpu, iStep);
330 AssertRC(rc);
331 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
332 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", idCpu, iStep);
333 AssertRC(rc);
334 }
335
336 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%u/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
337 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%u/ExitOpt/ExecSavedExit", "Net number of saved exits.");
338 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%u/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
339 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%u/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
340 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%u/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
341 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%u/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
342 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%u/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
343 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%u/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
344 }
345
346 emR3InitDbg(pVM);
347 return VINF_SUCCESS;
348}
349
350
351/**
352 * Called when a VM initialization stage is completed.
353 *
354 * @returns VBox status code.
355 * @param pVM The cross context VM structure.
356 * @param enmWhat The initialization state that was completed.
357 */
358VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
359{
360 if (enmWhat == VMINITCOMPLETED_RING0)
361 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
362 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabled, pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0,
363 pVM->apCpusR3[0]->em.s.fExitOptimizationEnabledR0PreemptDisabled));
364 return VINF_SUCCESS;
365}
366
367
368/**
369 * Applies relocations to data and code managed by this
370 * component. This function will be called at init and
371 * whenever the VMM need to relocate it self inside the GC.
372 *
373 * @param pVM The cross context VM structure.
374 */
375VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
376{
377 LogFlow(("EMR3Relocate\n"));
378 RT_NOREF(pVM);
379}
380
381
382/**
383 * Reset the EM state for a CPU.
384 *
385 * Called by EMR3Reset and hot plugging.
386 *
387 * @param pVCpu The cross context virtual CPU structure.
388 */
389VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
390{
391 /* Reset scheduling state. */
392 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
393
394 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
395 out of the HALTED state here so that enmPrevState doesn't end up as
396 HALTED when EMR3Execute returns. */
397 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
398 {
399 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
400 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
401 }
402}
403
404
405/**
406 * Reset notification.
407 *
408 * @param pVM The cross context VM structure.
409 */
410VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
411{
412 Log(("EMR3Reset: \n"));
413 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
414 EMR3ResetCpu(pVM->apCpusR3[idCpu]);
415}
416
417
418/**
419 * Terminates the EM.
420 *
421 * Termination means cleaning up and freeing all resources,
422 * the VM it self is at this point powered off or suspended.
423 *
424 * @returns VBox status code.
425 * @param pVM The cross context VM structure.
426 */
427VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
428{
429#ifdef VBOX_WITH_REM
430 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
431#else
432 RT_NOREF(pVM);
433#endif
434 return VINF_SUCCESS;
435}
436
437
438/**
439 * Execute state save operation.
440 *
441 * @returns VBox status code.
442 * @param pVM The cross context VM structure.
443 * @param pSSM SSM operation handle.
444 */
445static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
446{
447 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
448 {
449 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
450
451 SSMR3PutBool(pSSM, false /*fForceRAW*/);
452
453 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
454 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
455 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
456
457 /* Save mwait state. */
458 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
459 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
460 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
461 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
462 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
463 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
464 AssertRCReturn(rc, rc);
465 }
466 return VINF_SUCCESS;
467}
468
469
470/**
471 * Execute state load operation.
472 *
473 * @returns VBox status code.
474 * @param pVM The cross context VM structure.
475 * @param pSSM SSM operation handle.
476 * @param uVersion Data layout version.
477 * @param uPass The data pass.
478 */
479static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
480{
481 /*
482 * Validate version.
483 */
484 if ( uVersion > EM_SAVED_STATE_VERSION
485 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
486 {
487 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
488 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
489 }
490 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
491
492 /*
493 * Load the saved state.
494 */
495 for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++)
496 {
497 PVMCPU pVCpu = pVM->apCpusR3[idCpu];
498
499 bool fForceRAWIgnored;
500 int rc = SSMR3GetBool(pSSM, &fForceRAWIgnored);
501 AssertRCReturn(rc, rc);
502
503 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
504 {
505 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
506 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
507 AssertRCReturn(rc, rc);
508 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
509
510 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
511 }
512 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
513 {
514 /* Load mwait state. */
515 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
516 AssertRCReturn(rc, rc);
517 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
518 AssertRCReturn(rc, rc);
519 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
520 AssertRCReturn(rc, rc);
521 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
522 AssertRCReturn(rc, rc);
523 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
524 AssertRCReturn(rc, rc);
525 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
526 AssertRCReturn(rc, rc);
527 }
528
529 Assert(!pVCpu->em.s.pCliStatTree);
530 }
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Argument packet for emR3SetExecutionPolicy.
537 */
538struct EMR3SETEXECPOLICYARGS
539{
540 EMEXECPOLICY enmPolicy;
541 bool fEnforce;
542};
543
544
545/**
546 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
547 */
548static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
549{
550 /*
551 * Only the first CPU changes the variables.
552 */
553 if (pVCpu->idCpu == 0)
554 {
555 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
556 switch (pArgs->enmPolicy)
557 {
558 case EMEXECPOLICY_RECOMPILE_RING0:
559 case EMEXECPOLICY_RECOMPILE_RING3:
560 break;
561 case EMEXECPOLICY_IEM_ALL:
562 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
563 break;
564 default:
565 AssertFailedReturn(VERR_INVALID_PARAMETER);
566 }
567 Log(("EM: Set execution policy (fIemExecutesAll=%RTbool)\n", pVM->em.s.fIemExecutesAll));
568 }
569
570 /*
571 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
572 */
573 return pVCpu->em.s.enmState == EMSTATE_RAW
574 || pVCpu->em.s.enmState == EMSTATE_HM
575 || pVCpu->em.s.enmState == EMSTATE_NEM
576 || pVCpu->em.s.enmState == EMSTATE_IEM
577 || pVCpu->em.s.enmState == EMSTATE_REM
578 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
579 ? VINF_EM_RESCHEDULE
580 : VINF_SUCCESS;
581}
582
583
584/**
585 * Changes an execution scheduling policy parameter.
586 *
587 * This is used to enable or disable raw-mode / hardware-virtualization
588 * execution of user and supervisor code.
589 *
590 * @returns VINF_SUCCESS on success.
591 * @returns VINF_RESCHEDULE if a rescheduling might be required.
592 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
593 *
594 * @param pUVM The user mode VM handle.
595 * @param enmPolicy The scheduling policy to change.
596 * @param fEnforce Whether to enforce the policy or not.
597 */
598VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
599{
600 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
601 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
602 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
603
604 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
605 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
606}
607
608
609/**
610 * Queries an execution scheduling policy parameter.
611 *
612 * @returns VBox status code
613 * @param pUVM The user mode VM handle.
614 * @param enmPolicy The scheduling policy to query.
615 * @param pfEnforced Where to return the current value.
616 */
617VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
618{
619 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
620 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
621 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
622 PVM pVM = pUVM->pVM;
623 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
624
625 /* No need to bother EMTs with a query. */
626 switch (enmPolicy)
627 {
628 case EMEXECPOLICY_RECOMPILE_RING0:
629 case EMEXECPOLICY_RECOMPILE_RING3:
630 *pfEnforced = false;
631 break;
632 case EMEXECPOLICY_IEM_ALL:
633 *pfEnforced = pVM->em.s.fIemExecutesAll;
634 break;
635 default:
636 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
637 }
638
639 return VINF_SUCCESS;
640}
641
642
643/**
644 * Queries the main execution engine of the VM.
645 *
646 * @returns VBox status code
647 * @param pUVM The user mode VM handle.
648 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
649 */
650VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
651{
652 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
653 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
654
655 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
656 PVM pVM = pUVM->pVM;
657 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
658
659 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
660 return VINF_SUCCESS;
661}
662
663
664/**
665 * Raise a fatal error.
666 *
667 * Safely terminate the VM with full state report and stuff. This function
668 * will naturally never return.
669 *
670 * @param pVCpu The cross context virtual CPU structure.
671 * @param rc VBox status code.
672 */
673VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
674{
675 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
676 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
677}
678
679
680#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
681/**
682 * Gets the EM state name.
683 *
684 * @returns pointer to read only state name,
685 * @param enmState The state.
686 */
687static const char *emR3GetStateName(EMSTATE enmState)
688{
689 switch (enmState)
690 {
691 case EMSTATE_NONE: return "EMSTATE_NONE";
692 case EMSTATE_RAW: return "EMSTATE_RAW";
693 case EMSTATE_HM: return "EMSTATE_HM";
694 case EMSTATE_IEM: return "EMSTATE_IEM";
695 case EMSTATE_REM: return "EMSTATE_REM";
696 case EMSTATE_HALTED: return "EMSTATE_HALTED";
697 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
698 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
699 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
700 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
701 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
702 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
703 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
704 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
705 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
706 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
707 case EMSTATE_NEM: return "EMSTATE_NEM";
708 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
709 default: return "Unknown!";
710 }
711}
712#endif /* LOG_ENABLED || VBOX_STRICT */
713
714
715/**
716 * Handle pending ring-3 I/O port write.
717 *
718 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
719 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
720 *
721 * @returns Strict VBox status code.
722 * @param pVM The cross context VM structure.
723 * @param pVCpu The cross context virtual CPU structure.
724 */
725VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
726{
727 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
728
729 /* Get and clear the pending data. */
730 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
731 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
732 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
733 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
734 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
735
736 /* Assert sanity. */
737 switch (cbValue)
738 {
739 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
740 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
741 case 4: break;
742 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
743 }
744 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
745
746 /* Do the work.*/
747 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
748 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
749 if (IOM_SUCCESS(rcStrict))
750 {
751 pVCpu->cpum.GstCtx.rip += cbInstr;
752 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
753 }
754 return rcStrict;
755}
756
757
758/**
759 * Handle pending ring-3 I/O port write.
760 *
761 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
762 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
763 *
764 * @returns Strict VBox status code.
765 * @param pVM The cross context VM structure.
766 * @param pVCpu The cross context virtual CPU structure.
767 */
768VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
769{
770 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
771
772 /* Get and clear the pending data. */
773 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
774 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
775 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
776 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
777
778 /* Assert sanity. */
779 switch (cbValue)
780 {
781 case 1: break;
782 case 2: break;
783 case 4: break;
784 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
785 }
786 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
787 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
788
789 /* Do the work.*/
790 uint32_t uValue = 0;
791 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
792 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
793 if (IOM_SUCCESS(rcStrict))
794 {
795 if (cbValue == 4)
796 pVCpu->cpum.GstCtx.rax = uValue;
797 else if (cbValue == 2)
798 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
799 else
800 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
801 pVCpu->cpum.GstCtx.rip += cbInstr;
802 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
803 }
804 return rcStrict;
805}
806
807
808/**
809 * Debug loop.
810 *
811 * @returns VBox status code for EM.
812 * @param pVM The cross context VM structure.
813 * @param pVCpu The cross context virtual CPU structure.
814 * @param rc Current EM VBox status code.
815 */
816static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
817{
818 for (;;)
819 {
820 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
821 const VBOXSTRICTRC rcLast = rc;
822
823 /*
824 * Debug related RC.
825 */
826 switch (VBOXSTRICTRC_VAL(rc))
827 {
828 /*
829 * Single step an instruction.
830 */
831 case VINF_EM_DBG_STEP:
832 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
833 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
834 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
835 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
836 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
837 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
838 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
839#ifdef VBOX_WITH_REM
840 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
841 rc = emR3RemStep(pVM, pVCpu);
842#endif
843 else
844 {
845 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
846 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
847 rc = VINF_EM_DBG_STEPPED;
848 }
849 break;
850
851 /*
852 * Simple events: stepped, breakpoint, stop/assertion.
853 */
854 case VINF_EM_DBG_STEPPED:
855 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
856 break;
857
858 case VINF_EM_DBG_BREAKPOINT:
859 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
860 break;
861
862 case VINF_EM_DBG_STOP:
863 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
864 break;
865
866 case VINF_EM_DBG_EVENT:
867 rc = DBGFR3EventHandlePending(pVM, pVCpu);
868 break;
869
870 case VINF_EM_DBG_HYPER_STEPPED:
871 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
872 break;
873
874 case VINF_EM_DBG_HYPER_BREAKPOINT:
875 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
876 break;
877
878 case VINF_EM_DBG_HYPER_ASSERTION:
879 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
880 RTLogFlush(NULL);
881 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
882 break;
883
884 /*
885 * Guru meditation.
886 */
887 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
888 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
889 break;
890 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
891 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
892 break;
893 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
894 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
895 break;
896
897 default: /** @todo don't use default for guru, but make special errors code! */
898 {
899 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
900 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
901 break;
902 }
903 }
904
905 /*
906 * Process the result.
907 */
908 switch (VBOXSTRICTRC_VAL(rc))
909 {
910 /*
911 * Continue the debugging loop.
912 */
913 case VINF_EM_DBG_STEP:
914 case VINF_EM_DBG_STOP:
915 case VINF_EM_DBG_EVENT:
916 case VINF_EM_DBG_STEPPED:
917 case VINF_EM_DBG_BREAKPOINT:
918 case VINF_EM_DBG_HYPER_STEPPED:
919 case VINF_EM_DBG_HYPER_BREAKPOINT:
920 case VINF_EM_DBG_HYPER_ASSERTION:
921 break;
922
923 /*
924 * Resuming execution (in some form) has to be done here if we got
925 * a hypervisor debug event.
926 */
927 case VINF_SUCCESS:
928 case VINF_EM_RESUME:
929 case VINF_EM_SUSPEND:
930 case VINF_EM_RESCHEDULE:
931 case VINF_EM_RESCHEDULE_RAW:
932 case VINF_EM_RESCHEDULE_REM:
933 case VINF_EM_HALT:
934 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
935 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
936 if (rc == VINF_SUCCESS)
937 rc = VINF_EM_RESCHEDULE;
938 return rc;
939
940 /*
941 * The debugger isn't attached.
942 * We'll simply turn the thing off since that's the easiest thing to do.
943 */
944 case VERR_DBGF_NOT_ATTACHED:
945 switch (VBOXSTRICTRC_VAL(rcLast))
946 {
947 case VINF_EM_DBG_HYPER_STEPPED:
948 case VINF_EM_DBG_HYPER_BREAKPOINT:
949 case VINF_EM_DBG_HYPER_ASSERTION:
950 case VERR_TRPM_PANIC:
951 case VERR_TRPM_DONT_PANIC:
952 case VERR_VMM_RING0_ASSERTION:
953 case VERR_VMM_HYPER_CR3_MISMATCH:
954 case VERR_VMM_RING3_CALL_DISABLED:
955 return rcLast;
956 }
957 return VINF_EM_OFF;
958
959 /*
960 * Status codes terminating the VM in one or another sense.
961 */
962 case VINF_EM_TERMINATE:
963 case VINF_EM_OFF:
964 case VINF_EM_RESET:
965 case VINF_EM_NO_MEMORY:
966 case VINF_EM_RAW_STALE_SELECTOR:
967 case VINF_EM_RAW_IRET_TRAP:
968 case VERR_TRPM_PANIC:
969 case VERR_TRPM_DONT_PANIC:
970 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
971 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
972 case VERR_VMM_RING0_ASSERTION:
973 case VERR_VMM_HYPER_CR3_MISMATCH:
974 case VERR_VMM_RING3_CALL_DISABLED:
975 case VERR_INTERNAL_ERROR:
976 case VERR_INTERNAL_ERROR_2:
977 case VERR_INTERNAL_ERROR_3:
978 case VERR_INTERNAL_ERROR_4:
979 case VERR_INTERNAL_ERROR_5:
980 case VERR_IPE_UNEXPECTED_STATUS:
981 case VERR_IPE_UNEXPECTED_INFO_STATUS:
982 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
983 return rc;
984
985 /*
986 * The rest is unexpected, and will keep us here.
987 */
988 default:
989 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
990 break;
991 }
992 } /* debug for ever */
993}
994
995
996#if defined(VBOX_WITH_REM) || defined(DEBUG)
997/**
998 * Steps recompiled code.
999 *
1000 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1001 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1002 *
1003 * @param pVM The cross context VM structure.
1004 * @param pVCpu The cross context virtual CPU structure.
1005 */
1006static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1007{
1008 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1009
1010# ifdef VBOX_WITH_REM
1011 EMRemLock(pVM);
1012
1013 /*
1014 * Switch to REM, step instruction, switch back.
1015 */
1016 int rc = REMR3State(pVM, pVCpu);
1017 if (RT_SUCCESS(rc))
1018 {
1019 rc = REMR3Step(pVM, pVCpu);
1020 REMR3StateBack(pVM, pVCpu);
1021 }
1022 EMRemUnlock(pVM);
1023
1024# else
1025 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1026# endif
1027
1028 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1029 return rc;
1030}
1031#endif /* VBOX_WITH_REM || DEBUG */
1032
1033
1034#ifdef VBOX_WITH_REM
1035/**
1036 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1037 * critical section.
1038 *
1039 * @returns false - new fInREMState value.
1040 * @param pVM The cross context VM structure.
1041 * @param pVCpu The cross context virtual CPU structure.
1042 */
1043DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1044{
1045 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1046 REMR3StateBack(pVM, pVCpu);
1047 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1048
1049 EMRemUnlock(pVM);
1050 return false;
1051}
1052#endif
1053
1054
1055/**
1056 * Executes recompiled code.
1057 *
1058 * This function contains the recompiler version of the inner
1059 * execution loop (the outer loop being in EMR3ExecuteVM()).
1060 *
1061 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1062 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1063 *
1064 * @param pVM The cross context VM structure.
1065 * @param pVCpu The cross context virtual CPU structure.
1066 * @param pfFFDone Where to store an indicator telling whether or not
1067 * FFs were done before returning.
1068 *
1069 */
1070static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1071{
1072#ifdef LOG_ENABLED
1073 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1074
1075 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1076 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1077 else
1078 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1079#endif
1080 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1081
1082#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1083 AssertMsg( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1084 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1085 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1086#endif
1087
1088 /*
1089 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1090 * or the REM suggests raw-mode execution.
1091 */
1092 *pfFFDone = false;
1093#ifdef VBOX_WITH_REM
1094 bool fInREMState = false;
1095#else
1096 uint32_t cLoops = 0;
1097#endif
1098 int rc = VINF_SUCCESS;
1099 for (;;)
1100 {
1101#ifdef VBOX_WITH_REM
1102 /*
1103 * Lock REM and update the state if not already in sync.
1104 *
1105 * Note! Big lock, but you are not supposed to own any lock when
1106 * coming in here.
1107 */
1108 if (!fInREMState)
1109 {
1110 EMRemLock(pVM);
1111 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1112
1113 /* Flush the recompiler translation blocks if the VCPU has changed,
1114 also force a full CPU state resync. */
1115 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1116 {
1117 REMFlushTBs(pVM);
1118 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1119 }
1120 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1121
1122 rc = REMR3State(pVM, pVCpu);
1123
1124 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1125 if (RT_FAILURE(rc))
1126 break;
1127 fInREMState = true;
1128
1129 /*
1130 * We might have missed the raising of VMREQ, TIMER and some other
1131 * important FFs while we were busy switching the state. So, check again.
1132 */
1133 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1134 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1135 {
1136 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1137 goto l_REMDoForcedActions;
1138 }
1139 }
1140#endif
1141
1142 /*
1143 * Execute REM.
1144 */
1145 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1146 {
1147 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1148#ifdef VBOX_WITH_REM
1149 rc = REMR3Run(pVM, pVCpu);
1150#else
1151 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1152#endif
1153 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1154 }
1155 else
1156 {
1157 /* Give up this time slice; virtual time continues */
1158 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1159 RTThreadSleep(5);
1160 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1161 rc = VINF_SUCCESS;
1162 }
1163
1164 /*
1165 * Deal with high priority post execution FFs before doing anything
1166 * else. Sync back the state and leave the lock to be on the safe side.
1167 */
1168 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1169 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1170 {
1171#ifdef VBOX_WITH_REM
1172 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1173#endif
1174 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1175 }
1176
1177 /*
1178 * Process the returned status code.
1179 */
1180 if (rc != VINF_SUCCESS)
1181 {
1182 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1183 break;
1184 if (rc != VINF_REM_INTERRUPED_FF)
1185 {
1186#ifndef VBOX_WITH_REM
1187 /* Try dodge unimplemented IEM trouble by reschduling. */
1188 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1189 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1190 {
1191 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1192 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1193 {
1194 rc = VINF_EM_RESCHEDULE;
1195 break;
1196 }
1197 }
1198#endif
1199
1200 /*
1201 * Anything which is not known to us means an internal error
1202 * and the termination of the VM!
1203 */
1204 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1205 break;
1206 }
1207 }
1208
1209
1210 /*
1211 * Check and execute forced actions.
1212 *
1213 * Sync back the VM state and leave the lock before calling any of
1214 * these, you never know what's going to happen here.
1215 */
1216#ifdef VBOX_HIGH_RES_TIMERS_HACK
1217 TMTimerPollVoid(pVM, pVCpu);
1218#endif
1219 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1220 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1221 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK) )
1222 {
1223#ifdef VBOX_WITH_REM
1224l_REMDoForcedActions:
1225 if (fInREMState)
1226 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1227#endif
1228 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1229 rc = emR3ForcedActions(pVM, pVCpu, rc);
1230 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1231 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1232 if ( rc != VINF_SUCCESS
1233 && rc != VINF_EM_RESCHEDULE_REM)
1234 {
1235 *pfFFDone = true;
1236 break;
1237 }
1238 }
1239
1240#ifndef VBOX_WITH_REM
1241 /*
1242 * Have to check if we can get back to fast execution mode every so often.
1243 */
1244 if (!(++cLoops & 7))
1245 {
1246 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1247 if ( enmCheck != EMSTATE_REM
1248 && enmCheck != EMSTATE_IEM_THEN_REM)
1249 return VINF_EM_RESCHEDULE;
1250 }
1251#endif
1252
1253 } /* The Inner Loop, recompiled execution mode version. */
1254
1255
1256#ifdef VBOX_WITH_REM
1257 /*
1258 * Returning. Sync back the VM state if required.
1259 */
1260 if (fInREMState)
1261 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1262#endif
1263
1264 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1265 return rc;
1266}
1267
1268
1269#ifdef DEBUG
1270
1271int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1272{
1273 EMSTATE enmOldState = pVCpu->em.s.enmState;
1274
1275 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1276
1277 Log(("Single step BEGIN:\n"));
1278 for (uint32_t i = 0; i < cIterations; i++)
1279 {
1280 DBGFR3PrgStep(pVCpu);
1281 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1282 emR3RemStep(pVM, pVCpu);
1283 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1284 break;
1285 }
1286 Log(("Single step END:\n"));
1287 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1288 pVCpu->em.s.enmState = enmOldState;
1289 return VINF_EM_RESCHEDULE;
1290}
1291
1292#endif /* DEBUG */
1293
1294
1295/**
1296 * Try execute the problematic code in IEM first, then fall back on REM if there
1297 * is too much of it or if IEM doesn't implement something.
1298 *
1299 * @returns Strict VBox status code from IEMExecLots.
1300 * @param pVM The cross context VM structure.
1301 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1302 * @param pfFFDone Force flags done indicator.
1303 *
1304 * @thread EMT(pVCpu)
1305 */
1306static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1307{
1308 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1309 *pfFFDone = false;
1310
1311 /*
1312 * Execute in IEM for a while.
1313 */
1314 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1315 {
1316 uint32_t cInstructions;
1317 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1318 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1319 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1320 if (rcStrict != VINF_SUCCESS)
1321 {
1322 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1323 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1324 break;
1325
1326 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1327 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1328 return rcStrict;
1329 }
1330
1331 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1332 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1333 {
1334 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1335 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1336 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1337 pVCpu->em.s.enmState = enmNewState;
1338 return VINF_SUCCESS;
1339 }
1340
1341 /*
1342 * Check for pending actions.
1343 */
1344 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1345 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1346 return VINF_SUCCESS;
1347 }
1348
1349 /*
1350 * Switch to REM.
1351 */
1352 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1353 pVCpu->em.s.enmState = EMSTATE_REM;
1354 return VINF_SUCCESS;
1355}
1356
1357
1358/**
1359 * Decides whether to execute RAW, HWACC or REM.
1360 *
1361 * @returns new EM state
1362 * @param pVM The cross context VM structure.
1363 * @param pVCpu The cross context virtual CPU structure.
1364 */
1365EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1366{
1367 /*
1368 * We stay in the wait for SIPI state unless explicitly told otherwise.
1369 */
1370 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1371 return EMSTATE_WAIT_SIPI;
1372
1373 /*
1374 * Execute everything in IEM?
1375 */
1376 if (pVM->em.s.fIemExecutesAll)
1377 return EMSTATE_IEM;
1378
1379 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1380 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1381 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1382
1383 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1384 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1385 {
1386 if (VM_IS_HM_ENABLED(pVM))
1387 {
1388 if (HMCanExecuteGuest(pVM, pVCpu, &pVCpu->cpum.GstCtx))
1389 return EMSTATE_HM;
1390 }
1391 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1392 return EMSTATE_NEM;
1393
1394 /*
1395 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1396 * turns off monitoring features essential for raw mode!
1397 */
1398 return EMSTATE_IEM_THEN_REM;
1399 }
1400
1401 /*
1402 * Standard raw-mode:
1403 *
1404 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1405 * or 32 bits protected mode ring 0 code
1406 *
1407 * The tests are ordered by the likelihood of being true during normal execution.
1408 */
1409 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1410 {
1411 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1412 return EMSTATE_REM;
1413 }
1414
1415# ifndef VBOX_RAW_V86
1416 if (EFlags.u32 & X86_EFL_VM) {
1417 Log2(("raw mode refused: VM_MASK\n"));
1418 return EMSTATE_REM;
1419 }
1420# endif
1421
1422 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1423 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1424 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1425 {
1426 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1427 return EMSTATE_REM;
1428 }
1429
1430 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1431 {
1432 uint32_t u32Dummy, u32Features;
1433
1434 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1435 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1436 return EMSTATE_REM;
1437 }
1438
1439 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1440 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1441 || (uSS & X86_SEL_RPL) == 3)
1442 {
1443 if (!(EFlags.u32 & X86_EFL_IF))
1444 {
1445 Log2(("raw mode refused: IF (RawR3)\n"));
1446 return EMSTATE_REM;
1447 }
1448
1449 if (!(u32CR0 & X86_CR0_WP))
1450 {
1451 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1452 return EMSTATE_REM;
1453 }
1454 }
1455 else
1456 {
1457 /* Only ring 0 supervisor code. */
1458 if ((uSS & X86_SEL_RPL) != 0)
1459 {
1460 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1461 return EMSTATE_REM;
1462 }
1463
1464 // Let's start with pure 32 bits ring 0 code first
1465 /** @todo What's pure 32-bit mode? flat? */
1466 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1467 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1468 {
1469 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1470 return EMSTATE_REM;
1471 }
1472
1473 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1474 if (!(u32CR0 & X86_CR0_WP))
1475 {
1476 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1477 return EMSTATE_REM;
1478 }
1479
1480# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1481 if (!(EFlags.u32 & X86_EFL_IF))
1482 {
1483 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1484 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1485 return EMSTATE_REM;
1486 }
1487# endif
1488
1489# ifndef VBOX_WITH_RAW_RING1
1490 /** @todo still necessary??? */
1491 if (EFlags.Bits.u2IOPL != 0)
1492 {
1493 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1494 return EMSTATE_REM;
1495 }
1496# endif
1497 }
1498
1499 /*
1500 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1501 */
1502 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1503 {
1504 Log2(("raw mode refused: stale CS\n"));
1505 return EMSTATE_REM;
1506 }
1507 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1508 {
1509 Log2(("raw mode refused: stale SS\n"));
1510 return EMSTATE_REM;
1511 }
1512 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1513 {
1514 Log2(("raw mode refused: stale DS\n"));
1515 return EMSTATE_REM;
1516 }
1517 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1518 {
1519 Log2(("raw mode refused: stale ES\n"));
1520 return EMSTATE_REM;
1521 }
1522 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1523 {
1524 Log2(("raw mode refused: stale FS\n"));
1525 return EMSTATE_REM;
1526 }
1527 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1528 {
1529 Log2(("raw mode refused: stale GS\n"));
1530 return EMSTATE_REM;
1531 }
1532
1533# ifdef VBOX_WITH_SAFE_STR
1534 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1535 {
1536 Log(("Raw mode refused -> TR=0\n"));
1537 return EMSTATE_REM;
1538 }
1539# endif
1540
1541 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1542 return EMSTATE_RAW;
1543}
1544
1545
1546/**
1547 * Executes all high priority post execution force actions.
1548 *
1549 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1550 * fatal error status code.
1551 *
1552 * @param pVM The cross context VM structure.
1553 * @param pVCpu The cross context virtual CPU structure.
1554 * @param rc The current strict VBox status code rc.
1555 */
1556VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1557{
1558 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1559
1560 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1561 PDMCritSectBothFF(pVCpu);
1562
1563 /* Update CR3 (Nested Paging case for HM). */
1564 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1565 {
1566 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1567 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1568 if (RT_FAILURE(rc2))
1569 return rc2;
1570 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1571 }
1572
1573 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1574 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1575 {
1576 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1577 if (CPUMIsGuestInPAEMode(pVCpu))
1578 {
1579 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1580 AssertPtr(pPdpes);
1581
1582 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1583 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1584 }
1585 else
1586 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1587 }
1588
1589 /* IEM has pending work (typically memory write after INS instruction). */
1590 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1591 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1592
1593 /* IOM has pending work (comitting an I/O or MMIO write). */
1594 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1595 {
1596 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1597 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1598 { /* half likely, or at least it's a line shorter. */ }
1599 else if (rc == VINF_SUCCESS)
1600 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1601 else
1602 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1603 }
1604
1605 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1606 {
1607 if ( rc > VINF_EM_NO_MEMORY
1608 && rc <= VINF_EM_LAST)
1609 rc = VINF_EM_NO_MEMORY;
1610 }
1611
1612 return rc;
1613}
1614
1615
1616/**
1617 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1618 *
1619 * @returns VBox status code.
1620 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1621 * @param pVCpu The cross context virtual CPU structure.
1622 */
1623static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1624{
1625#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1626 /* Handle the "external interrupt" VM-exit intercept. */
1627 if ( CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1628 && !CPUMIsGuestVmxExitCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1629 {
1630 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1631 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1632 && rcStrict != VINF_VMX_VMEXIT
1633 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1634 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1635 return VBOXSTRICTRC_TODO(rcStrict);
1636 }
1637#else
1638 RT_NOREF(pVCpu);
1639#endif
1640 return VINF_NO_CHANGE;
1641}
1642
1643
1644/**
1645 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1646 *
1647 * @returns VBox status code.
1648 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1649 * @param pVCpu The cross context virtual CPU structure.
1650 */
1651static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1652{
1653#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1654 /* Handle the physical interrupt intercept (can be masked by the nested hypervisor). */
1655 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1656 {
1657 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1658 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1659 if (RT_SUCCESS(rcStrict))
1660 {
1661 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1662 && rcStrict != VINF_SVM_VMEXIT
1663 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1664 return VBOXSTRICTRC_VAL(rcStrict);
1665 }
1666
1667 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1668 return VINF_EM_TRIPLE_FAULT;
1669 }
1670#else
1671 NOREF(pVCpu);
1672#endif
1673 return VINF_NO_CHANGE;
1674}
1675
1676
1677/**
1678 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1679 *
1680 * @returns VBox status code.
1681 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1682 * @param pVCpu The cross context virtual CPU structure.
1683 */
1684static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1685{
1686#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1687 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1688 {
1689 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1690 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1691 if (RT_SUCCESS(rcStrict))
1692 {
1693 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1694 Assert(rcStrict != VINF_SVM_VMEXIT);
1695 return VBOXSTRICTRC_VAL(rcStrict);
1696 }
1697 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1698 return VINF_EM_TRIPLE_FAULT;
1699 }
1700#else
1701 NOREF(pVCpu);
1702#endif
1703 return VINF_NO_CHANGE;
1704}
1705
1706
1707/**
1708 * Executes all pending forced actions.
1709 *
1710 * Forced actions can cause execution delays and execution
1711 * rescheduling. The first we deal with using action priority, so
1712 * that for instance pending timers aren't scheduled and ran until
1713 * right before execution. The rescheduling we deal with using
1714 * return codes. The same goes for VM termination, only in that case
1715 * we exit everything.
1716 *
1717 * @returns VBox status code of equal or greater importance/severity than rc.
1718 * The most important ones are: VINF_EM_RESCHEDULE,
1719 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1720 *
1721 * @param pVM The cross context VM structure.
1722 * @param pVCpu The cross context virtual CPU structure.
1723 * @param rc The current rc.
1724 *
1725 */
1726int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1727{
1728 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1729#ifdef VBOX_STRICT
1730 int rcIrq = VINF_SUCCESS;
1731#endif
1732 int rc2;
1733#define UPDATE_RC() \
1734 do { \
1735 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1736 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1737 break; \
1738 if (!rc || rc2 < rc) \
1739 rc = rc2; \
1740 } while (0)
1741 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1742
1743 /*
1744 * Post execution chunk first.
1745 */
1746 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1747 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1748 {
1749 /*
1750 * EMT Rendezvous (must be serviced before termination).
1751 */
1752 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1753 {
1754 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1755 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1756 UPDATE_RC();
1757 /** @todo HACK ALERT! The following test is to make sure EM+TM
1758 * thinks the VM is stopped/reset before the next VM state change
1759 * is made. We need a better solution for this, or at least make it
1760 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1761 * VINF_EM_SUSPEND). */
1762 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1763 {
1764 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1765 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1766 return rc;
1767 }
1768 }
1769
1770 /*
1771 * State change request (cleared by vmR3SetStateLocked).
1772 */
1773 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1774 {
1775 VMSTATE enmState = VMR3GetState(pVM);
1776 switch (enmState)
1777 {
1778 case VMSTATE_FATAL_ERROR:
1779 case VMSTATE_FATAL_ERROR_LS:
1780 case VMSTATE_GURU_MEDITATION:
1781 case VMSTATE_GURU_MEDITATION_LS:
1782 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1783 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1784 return VINF_EM_SUSPEND;
1785
1786 case VMSTATE_DESTROYING:
1787 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1788 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1789 return VINF_EM_TERMINATE;
1790
1791 default:
1792 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1793 }
1794 }
1795
1796 /*
1797 * Debugger Facility polling.
1798 */
1799 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1800 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1801 {
1802 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1803 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1804 UPDATE_RC();
1805 }
1806
1807 /*
1808 * Postponed reset request.
1809 */
1810 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1811 {
1812 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1813 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1814 UPDATE_RC();
1815 }
1816
1817 /*
1818 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1819 */
1820 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1821 {
1822 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1823 UPDATE_RC();
1824 if (rc == VINF_EM_NO_MEMORY)
1825 return rc;
1826 }
1827
1828 /* check that we got them all */
1829 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1830 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_DBGF);
1831 }
1832
1833 /*
1834 * Normal priority then.
1835 * (Executed in no particular order.)
1836 */
1837 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1838 {
1839 /*
1840 * PDM Queues are pending.
1841 */
1842 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1843 PDMR3QueueFlushAll(pVM);
1844
1845 /*
1846 * PDM DMA transfers are pending.
1847 */
1848 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1849 PDMR3DmaRun(pVM);
1850
1851 /*
1852 * EMT Rendezvous (make sure they are handled before the requests).
1853 */
1854 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1855 {
1856 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1857 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1858 UPDATE_RC();
1859 /** @todo HACK ALERT! The following test is to make sure EM+TM
1860 * thinks the VM is stopped/reset before the next VM state change
1861 * is made. We need a better solution for this, or at least make it
1862 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1863 * VINF_EM_SUSPEND). */
1864 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1865 {
1866 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1867 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1868 return rc;
1869 }
1870 }
1871
1872 /*
1873 * Requests from other threads.
1874 */
1875 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1876 {
1877 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1878 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1879 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1880 {
1881 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1882 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1883 return rc2;
1884 }
1885 UPDATE_RC();
1886 /** @todo HACK ALERT! The following test is to make sure EM+TM
1887 * thinks the VM is stopped/reset before the next VM state change
1888 * is made. We need a better solution for this, or at least make it
1889 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1890 * VINF_EM_SUSPEND). */
1891 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1892 {
1893 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1894 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1895 return rc;
1896 }
1897 }
1898
1899#ifdef VBOX_WITH_REM
1900 /* Replay the handler notification changes. */
1901 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1902 {
1903 /* Try not to cause deadlocks. */
1904 if ( pVM->cCpus == 1
1905 || ( !PGMIsLockOwner(pVM)
1906 && !IOMIsLockWriteOwner(pVM))
1907 )
1908 {
1909 EMRemLock(pVM);
1910 REMR3ReplayHandlerNotifications(pVM);
1911 EMRemUnlock(pVM);
1912 }
1913 }
1914#endif
1915
1916 /* check that we got them all */
1917 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1918 }
1919
1920 /*
1921 * Normal priority then. (per-VCPU)
1922 * (Executed in no particular order.)
1923 */
1924 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1925 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1926 {
1927 /*
1928 * Requests from other threads.
1929 */
1930 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
1931 {
1932 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1933 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1934 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1935 {
1936 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1937 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1938 return rc2;
1939 }
1940 UPDATE_RC();
1941 /** @todo HACK ALERT! The following test is to make sure EM+TM
1942 * thinks the VM is stopped/reset before the next VM state change
1943 * is made. We need a better solution for this, or at least make it
1944 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1945 * VINF_EM_SUSPEND). */
1946 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1947 {
1948 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1949 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1950 return rc;
1951 }
1952 }
1953
1954 /* check that we got them all */
1955 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1956 }
1957
1958 /*
1959 * High priority pre execution chunk last.
1960 * (Executed in ascending priority order.)
1961 */
1962 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1963 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1964 {
1965 /*
1966 * Timers before interrupts.
1967 */
1968 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
1969 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1970 TMR3TimerQueuesDo(pVM);
1971
1972 /*
1973 * Pick up asynchronously posted interrupts into the APIC.
1974 */
1975 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1976 APICUpdatePendingInterrupts(pVCpu);
1977
1978 /*
1979 * The instruction following an emulated STI should *always* be executed!
1980 *
1981 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1982 * the eip is the same as the inhibited instr address. Before we
1983 * are able to execute this instruction in raw mode (iret to
1984 * guest code) an external interrupt might force a world switch
1985 * again. Possibly allowing a guest interrupt to be dispatched
1986 * in the process. This could break the guest. Sounds very
1987 * unlikely, but such timing sensitive problem are not as rare as
1988 * you might think.
1989 */
1990 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1991 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1992 {
1993 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
1994 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1995 {
1996 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1997 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1998 }
1999 else
2000 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2001 }
2002
2003 /** @todo SMIs. If we implement SMIs, this is where they will have to be
2004 * delivered. */
2005
2006#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2007 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_PREEMPT_TIMER))
2008 {
2009 /*
2010 * VMX Nested-guest APIC-write pending (can cause VM-exits).
2011 * Takes priority over even SMI and INIT signals.
2012 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
2013 */
2014 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
2015 {
2016 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
2017 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2018 UPDATE_RC();
2019 }
2020
2021 /*
2022 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
2023 * Takes priority over "Traps on the previous instruction".
2024 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
2025 */
2026 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
2027 {
2028 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
2029 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
2030 UPDATE_RC();
2031 }
2032
2033 /*
2034 * VMX Nested-guest preemption timer VM-exit.
2035 * Takes priority over NMI-window VM-exits.
2036 */
2037 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
2038 {
2039 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
2040 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2041 UPDATE_RC();
2042 }
2043 }
2044#endif
2045
2046 /*
2047 * Guest event injection.
2048 */
2049 bool fWakeupPending = false;
2050 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
2051 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
2052 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
2053 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
2054 {
2055 bool fInVmxNonRootMode;
2056 bool fInSvmHwvirtMode;
2057 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
2058 if (fInNestedGuest)
2059 {
2060 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
2061 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
2062 }
2063 else
2064 {
2065 fInVmxNonRootMode = false;
2066 fInSvmHwvirtMode = false;
2067 }
2068
2069 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
2070 if (fGif)
2071 {
2072#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2073 /*
2074 * VMX NMI-window VM-exit.
2075 * Takes priority over non-maskable interrupts (NMIs).
2076 * Interrupt shadows block NMI-window VM-exits.
2077 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
2078 *
2079 * See Intel spec. 25.2 "Other Causes Of VM Exits".
2080 * See Intel spec. 26.7.6 "NMI-Window Exiting".
2081 */
2082 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
2083 && !CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
2084 {
2085 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
2086 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
2087 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
2088 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2089 && rc2 != VINF_PGM_CHANGE_MODE
2090 && rc2 != VINF_VMX_VMEXIT
2091 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2092 UPDATE_RC();
2093 }
2094 else
2095#endif
2096 /*
2097 * NMIs (take priority over external interrupts).
2098 */
2099 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
2100 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2101 {
2102#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2103 if ( fInVmxNonRootMode
2104 && CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
2105 {
2106 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
2107 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
2108 UPDATE_RC();
2109 }
2110 else
2111#endif
2112#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2113 if ( fInSvmHwvirtMode
2114 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
2115 {
2116 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
2117 AssertMsg( rc2 != VINF_PGM_CHANGE_MODE
2118 && rc2 != VINF_SVM_VMEXIT
2119 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2120 UPDATE_RC();
2121 }
2122 else
2123#endif
2124 {
2125 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
2126 if (rc2 == VINF_SUCCESS)
2127 {
2128 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2129 fWakeupPending = true;
2130 if (pVM->em.s.fIemExecutesAll)
2131 rc2 = VINF_EM_RESCHEDULE;
2132 else
2133 {
2134 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
2135 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
2136 : VINF_EM_RESCHEDULE_REM;
2137 }
2138 }
2139 UPDATE_RC();
2140 }
2141 }
2142#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2143 /*
2144 * VMX Interrupt-window VM-exits.
2145 * Takes priority over external interrupts.
2146 */
2147 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
2148 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2149 {
2150 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
2151 Assert(CPUMIsGuestVmxInterceptEvents(&pVCpu->cpum.GstCtx));
2152 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
2153 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2154 && rc2 != VINF_PGM_CHANGE_MODE
2155 && rc2 != VINF_VMX_VMEXIT
2156 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2157 UPDATE_RC();
2158 }
2159#endif
2160#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2161 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
2162 * actually pending like we currently do. */
2163#endif
2164 /*
2165 * External interrupts.
2166 */
2167 else
2168 {
2169 /*
2170 * VMX: virtual interrupts takes priority over physical interrupts.
2171 * SVM: physical interrupts takes priority over virtual interrupts.
2172 */
2173 if ( fInVmxNonRootMode
2174 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2175 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2176 {
2177 /** @todo NSTVMX: virtual-interrupt delivery. */
2178 rc2 = VINF_SUCCESS;
2179 }
2180 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2181 && CPUMIsGuestPhysIntrEnabled(pVCpu))
2182 {
2183 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2184 if (fInVmxNonRootMode)
2185 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
2186 else if (fInSvmHwvirtMode)
2187 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
2188 else
2189 rc2 = VINF_NO_CHANGE;
2190
2191 if (rc2 == VINF_NO_CHANGE)
2192 {
2193 bool fInjected = false;
2194 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2195 /** @todo this really isn't nice, should properly handle this */
2196 /* Note! This can still cause a VM-exit (on Intel). */
2197 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2198 fWakeupPending = true;
2199 if ( pVM->em.s.fIemExecutesAll
2200 && ( rc2 == VINF_EM_RESCHEDULE_REM
2201 || rc2 == VINF_EM_RESCHEDULE_HM
2202 || rc2 == VINF_EM_RESCHEDULE_RAW))
2203 {
2204 rc2 = VINF_EM_RESCHEDULE;
2205 }
2206#ifdef VBOX_STRICT
2207 if (fInjected)
2208 rcIrq = rc2;
2209#endif
2210 }
2211 UPDATE_RC();
2212 }
2213 else if ( fInSvmHwvirtMode
2214 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2215 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2216 {
2217 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2218 if (rc2 == VINF_NO_CHANGE)
2219 {
2220 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2221 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2222 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2223 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2224 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2225 rc2 = VINF_EM_RESCHEDULE;
2226#ifdef VBOX_STRICT
2227 rcIrq = rc2;
2228#endif
2229 }
2230 UPDATE_RC();
2231 }
2232 }
2233 }
2234 }
2235
2236 /*
2237 * Allocate handy pages.
2238 */
2239 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2240 {
2241 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2242 UPDATE_RC();
2243 }
2244
2245 /*
2246 * Debugger Facility request.
2247 */
2248 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2249 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2250 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2251 {
2252 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2253 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2254 UPDATE_RC();
2255 }
2256
2257 /*
2258 * EMT Rendezvous (must be serviced before termination).
2259 */
2260 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2261 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2262 {
2263 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2264 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2265 UPDATE_RC();
2266 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2267 * stopped/reset before the next VM state change is made. We need a better
2268 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2269 * && rc >= VINF_EM_SUSPEND). */
2270 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2271 {
2272 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2273 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2274 return rc;
2275 }
2276 }
2277
2278 /*
2279 * State change request (cleared by vmR3SetStateLocked).
2280 */
2281 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2282 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2283 {
2284 VMSTATE enmState = VMR3GetState(pVM);
2285 switch (enmState)
2286 {
2287 case VMSTATE_FATAL_ERROR:
2288 case VMSTATE_FATAL_ERROR_LS:
2289 case VMSTATE_GURU_MEDITATION:
2290 case VMSTATE_GURU_MEDITATION_LS:
2291 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2292 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2293 return VINF_EM_SUSPEND;
2294
2295 case VMSTATE_DESTROYING:
2296 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2297 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2298 return VINF_EM_TERMINATE;
2299
2300 default:
2301 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2302 }
2303 }
2304
2305 /*
2306 * Out of memory? Since most of our fellow high priority actions may cause us
2307 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2308 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2309 * than us since we can terminate without allocating more memory.
2310 */
2311 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2312 {
2313 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2314 UPDATE_RC();
2315 if (rc == VINF_EM_NO_MEMORY)
2316 return rc;
2317 }
2318
2319 /*
2320 * If the virtual sync clock is still stopped, make TM restart it.
2321 */
2322 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2323 TMR3VirtualSyncFF(pVM, pVCpu);
2324
2325#ifdef DEBUG
2326 /*
2327 * Debug, pause the VM.
2328 */
2329 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2330 {
2331 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2332 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2333 return VINF_EM_SUSPEND;
2334 }
2335#endif
2336
2337 /* check that we got them all */
2338 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2339 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW));
2340 }
2341
2342#undef UPDATE_RC
2343 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2344 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2345 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2346 return rc;
2347}
2348
2349
2350/**
2351 * Check if the preset execution time cap restricts guest execution scheduling.
2352 *
2353 * @returns true if allowed, false otherwise
2354 * @param pVM The cross context VM structure.
2355 * @param pVCpu The cross context virtual CPU structure.
2356 */
2357bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2358{
2359 uint64_t u64UserTime, u64KernelTime;
2360
2361 if ( pVM->uCpuExecutionCap != 100
2362 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2363 {
2364 uint64_t u64TimeNow = RTTimeMilliTS();
2365 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2366 {
2367 /* New time slice. */
2368 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2369 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2370 pVCpu->em.s.u64TimeSliceExec = 0;
2371 }
2372 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2373
2374 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2375 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2376 return false;
2377 }
2378 return true;
2379}
2380
2381
2382/**
2383 * Execute VM.
2384 *
2385 * This function is the main loop of the VM. The emulation thread
2386 * calls this function when the VM has been successfully constructed
2387 * and we're ready for executing the VM.
2388 *
2389 * Returning from this function means that the VM is turned off or
2390 * suspended (state already saved) and deconstruction is next in line.
2391 *
2392 * All interaction from other thread are done using forced actions
2393 * and signalling of the wait object.
2394 *
2395 * @returns VBox status code, informational status codes may indicate failure.
2396 * @param pVM The cross context VM structure.
2397 * @param pVCpu The cross context virtual CPU structure.
2398 */
2399VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2400{
2401 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s)\n",
2402 pVM,
2403 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2404 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2405 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState) ));
2406 VM_ASSERT_EMT(pVM);
2407 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2408 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2409 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2410 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2411
2412 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2413 if (rc == 0)
2414 {
2415 /*
2416 * Start the virtual time.
2417 */
2418 TMR3NotifyResume(pVM, pVCpu);
2419
2420 /*
2421 * The Outer Main Loop.
2422 */
2423 bool fFFDone = false;
2424
2425 /* Reschedule right away to start in the right state. */
2426 rc = VINF_SUCCESS;
2427
2428 /* If resuming after a pause or a state load, restore the previous
2429 state or else we'll start executing code. Else, just reschedule. */
2430 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2431 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2432 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2433 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2434 else
2435 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2436 pVCpu->em.s.cIemThenRemInstructions = 0;
2437 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2438
2439 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2440 for (;;)
2441 {
2442 /*
2443 * Before we can schedule anything (we're here because
2444 * scheduling is required) we must service any pending
2445 * forced actions to avoid any pending action causing
2446 * immediate rescheduling upon entering an inner loop
2447 *
2448 * Do forced actions.
2449 */
2450 if ( !fFFDone
2451 && RT_SUCCESS(rc)
2452 && rc != VINF_EM_TERMINATE
2453 && rc != VINF_EM_OFF
2454 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2455 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2456 {
2457 rc = emR3ForcedActions(pVM, pVCpu, rc);
2458 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2459 }
2460 else if (fFFDone)
2461 fFFDone = false;
2462
2463 /*
2464 * Now what to do?
2465 */
2466 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2467 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2468 switch (rc)
2469 {
2470 /*
2471 * Keep doing what we're currently doing.
2472 */
2473 case VINF_SUCCESS:
2474 break;
2475
2476 /*
2477 * Reschedule - to raw-mode execution.
2478 */
2479/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2480 case VINF_EM_RESCHEDULE_RAW:
2481 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2482 if (VM_IS_RAW_MODE_ENABLED(pVM))
2483 {
2484 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2485 pVCpu->em.s.enmState = EMSTATE_RAW;
2486 }
2487 else
2488 {
2489 AssertLogRelFailed();
2490 pVCpu->em.s.enmState = EMSTATE_NONE;
2491 }
2492 break;
2493
2494 /*
2495 * Reschedule - to HM or NEM.
2496 */
2497 case VINF_EM_RESCHEDULE_HM:
2498 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2499 if (VM_IS_HM_ENABLED(pVM))
2500 {
2501 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2502 pVCpu->em.s.enmState = EMSTATE_HM;
2503 }
2504 else if (VM_IS_NEM_ENABLED(pVM))
2505 {
2506 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2507 pVCpu->em.s.enmState = EMSTATE_NEM;
2508 }
2509 else
2510 {
2511 AssertLogRelFailed();
2512 pVCpu->em.s.enmState = EMSTATE_NONE;
2513 }
2514 break;
2515
2516 /*
2517 * Reschedule - to recompiled execution.
2518 */
2519 case VINF_EM_RESCHEDULE_REM:
2520 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2521 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2522 {
2523 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2524 enmOldState, EMSTATE_IEM_THEN_REM));
2525 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2526 {
2527 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2528 pVCpu->em.s.cIemThenRemInstructions = 0;
2529 }
2530 }
2531 else
2532 {
2533 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2534 pVCpu->em.s.enmState = EMSTATE_REM;
2535 }
2536 break;
2537
2538 /*
2539 * Resume.
2540 */
2541 case VINF_EM_RESUME:
2542 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2543 /* Don't reschedule in the halted or wait for SIPI case. */
2544 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2545 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2546 {
2547 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2548 break;
2549 }
2550 /* fall through and get scheduled. */
2551 RT_FALL_THRU();
2552
2553 /*
2554 * Reschedule.
2555 */
2556 case VINF_EM_RESCHEDULE:
2557 {
2558 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2559 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2560 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2561 pVCpu->em.s.cIemThenRemInstructions = 0;
2562 pVCpu->em.s.enmState = enmState;
2563 break;
2564 }
2565
2566 /*
2567 * Halted.
2568 */
2569 case VINF_EM_HALT:
2570 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2571 pVCpu->em.s.enmState = EMSTATE_HALTED;
2572 break;
2573
2574 /*
2575 * Switch to the wait for SIPI state (application processor only)
2576 */
2577 case VINF_EM_WAIT_SIPI:
2578 Assert(pVCpu->idCpu != 0);
2579 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2580 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2581 break;
2582
2583
2584 /*
2585 * Suspend.
2586 */
2587 case VINF_EM_SUSPEND:
2588 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2589 Assert(enmOldState != EMSTATE_SUSPENDED);
2590 pVCpu->em.s.enmPrevState = enmOldState;
2591 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2592 break;
2593
2594 /*
2595 * Reset.
2596 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2597 */
2598 case VINF_EM_RESET:
2599 {
2600 if (pVCpu->idCpu == 0)
2601 {
2602 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2603 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2604 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2605 pVCpu->em.s.cIemThenRemInstructions = 0;
2606 pVCpu->em.s.enmState = enmState;
2607 }
2608 else
2609 {
2610 /* All other VCPUs go into the wait for SIPI state. */
2611 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2612 }
2613 break;
2614 }
2615
2616 /*
2617 * Power Off.
2618 */
2619 case VINF_EM_OFF:
2620 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2621 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2622 TMR3NotifySuspend(pVM, pVCpu);
2623 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2624 return rc;
2625
2626 /*
2627 * Terminate the VM.
2628 */
2629 case VINF_EM_TERMINATE:
2630 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2631 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2632 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2633 TMR3NotifySuspend(pVM, pVCpu);
2634 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2635 return rc;
2636
2637
2638 /*
2639 * Out of memory, suspend the VM and stuff.
2640 */
2641 case VINF_EM_NO_MEMORY:
2642 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2643 Assert(enmOldState != EMSTATE_SUSPENDED);
2644 pVCpu->em.s.enmPrevState = enmOldState;
2645 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2646 TMR3NotifySuspend(pVM, pVCpu);
2647 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2648
2649 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2650 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2651 if (rc != VINF_EM_SUSPEND)
2652 {
2653 if (RT_SUCCESS_NP(rc))
2654 {
2655 AssertLogRelMsgFailed(("%Rrc\n", rc));
2656 rc = VERR_EM_INTERNAL_ERROR;
2657 }
2658 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2659 }
2660 return rc;
2661
2662 /*
2663 * Guest debug events.
2664 */
2665 case VINF_EM_DBG_STEPPED:
2666 case VINF_EM_DBG_STOP:
2667 case VINF_EM_DBG_EVENT:
2668 case VINF_EM_DBG_BREAKPOINT:
2669 case VINF_EM_DBG_STEP:
2670 if (enmOldState == EMSTATE_RAW)
2671 {
2672 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2673 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2674 }
2675 else if (enmOldState == EMSTATE_HM)
2676 {
2677 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2678 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2679 }
2680 else if (enmOldState == EMSTATE_NEM)
2681 {
2682 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2683 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2684 }
2685 else if (enmOldState == EMSTATE_REM)
2686 {
2687 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2688 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2689 }
2690 else
2691 {
2692 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2693 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2694 }
2695 break;
2696
2697 /*
2698 * Hypervisor debug events.
2699 */
2700 case VINF_EM_DBG_HYPER_STEPPED:
2701 case VINF_EM_DBG_HYPER_BREAKPOINT:
2702 case VINF_EM_DBG_HYPER_ASSERTION:
2703 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2704 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2705 break;
2706
2707 /*
2708 * Triple fault.
2709 */
2710 case VINF_EM_TRIPLE_FAULT:
2711 if (!pVM->em.s.fGuruOnTripleFault)
2712 {
2713 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2714 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2715 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2716 continue;
2717 }
2718 /* Else fall through and trigger a guru. */
2719 RT_FALL_THRU();
2720
2721 case VERR_VMM_RING0_ASSERTION:
2722 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2723 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2724 break;
2725
2726 /*
2727 * Any error code showing up here other than the ones we
2728 * know and process above are considered to be FATAL.
2729 *
2730 * Unknown warnings and informational status codes are also
2731 * included in this.
2732 */
2733 default:
2734 if (RT_SUCCESS_NP(rc))
2735 {
2736 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2737 rc = VERR_EM_INTERNAL_ERROR;
2738 }
2739 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2740 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2741 break;
2742 }
2743
2744 /*
2745 * Act on state transition.
2746 */
2747 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2748 if (enmOldState != enmNewState)
2749 {
2750 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2751
2752 /* Clear MWait flags and the unhalt FF. */
2753 if ( enmOldState == EMSTATE_HALTED
2754 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2755 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2756 && ( enmNewState == EMSTATE_RAW
2757 || enmNewState == EMSTATE_HM
2758 || enmNewState == EMSTATE_NEM
2759 || enmNewState == EMSTATE_REM
2760 || enmNewState == EMSTATE_IEM_THEN_REM
2761 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2762 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2763 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2764 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2765 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2766 {
2767 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2768 {
2769 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2770 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2771 }
2772 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2773 {
2774 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2775 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2776 }
2777 }
2778 }
2779 else
2780 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2781
2782 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2783 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2784
2785 /*
2786 * Act on the new state.
2787 */
2788 switch (enmNewState)
2789 {
2790 /*
2791 * Execute raw.
2792 */
2793 case EMSTATE_RAW:
2794 AssertLogRelMsgFailed(("%Rrc\n", rc));
2795 rc = VERR_EM_INTERNAL_ERROR;
2796 break;
2797
2798 /*
2799 * Execute hardware accelerated raw.
2800 */
2801 case EMSTATE_HM:
2802 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2803 break;
2804
2805 /*
2806 * Execute hardware accelerated raw.
2807 */
2808 case EMSTATE_NEM:
2809 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2810 break;
2811
2812 /*
2813 * Execute recompiled.
2814 */
2815 case EMSTATE_REM:
2816 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2817 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2818 break;
2819
2820 /*
2821 * Execute in the interpreter.
2822 */
2823 case EMSTATE_IEM:
2824 {
2825 uint32_t cInstructions = 0;
2826#if 0 /* For testing purposes. */
2827 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2828 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2829 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2830 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2831 rc = VINF_SUCCESS;
2832 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2833#endif
2834 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2835 if (pVM->em.s.fIemExecutesAll)
2836 {
2837 Assert(rc != VINF_EM_RESCHEDULE_REM);
2838 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2839 Assert(rc != VINF_EM_RESCHEDULE_HM);
2840#ifdef VBOX_HIGH_RES_TIMERS_HACK
2841 if (cInstructions < 2048)
2842 TMTimerPollVoid(pVM, pVCpu);
2843#endif
2844 }
2845 fFFDone = false;
2846 break;
2847 }
2848
2849 /*
2850 * Execute in IEM, hoping we can quickly switch aback to HM
2851 * or RAW execution. If our hopes fail, we go to REM.
2852 */
2853 case EMSTATE_IEM_THEN_REM:
2854 {
2855 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2856 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2857 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2858 break;
2859 }
2860
2861 /*
2862 * Application processor execution halted until SIPI.
2863 */
2864 case EMSTATE_WAIT_SIPI:
2865 /* no break */
2866 /*
2867 * hlt - execution halted until interrupt.
2868 */
2869 case EMSTATE_HALTED:
2870 {
2871 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2872 /* If HM (or someone else) store a pending interrupt in
2873 TRPM, it must be dispatched ASAP without any halting.
2874 Anything pending in TRPM has been accepted and the CPU
2875 should already be the right state to receive it. */
2876 if (TRPMHasTrap(pVCpu))
2877 rc = VINF_EM_RESCHEDULE;
2878 /* MWAIT has a special extension where it's woken up when
2879 an interrupt is pending even when IF=0. */
2880 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2881 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2882 {
2883 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2884 if (rc == VINF_SUCCESS)
2885 {
2886 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2887 APICUpdatePendingInterrupts(pVCpu);
2888
2889 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2890 | VMCPU_FF_INTERRUPT_NESTED_GUEST
2891 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2892 {
2893 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2894 rc = VINF_EM_RESCHEDULE;
2895 }
2896 }
2897 }
2898 else
2899 {
2900 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2901 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2902 check VMCPU_FF_UPDATE_APIC here. */
2903 if ( rc == VINF_SUCCESS
2904 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2905 {
2906 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2907 rc = VINF_EM_RESCHEDULE;
2908 }
2909 }
2910
2911 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2912 break;
2913 }
2914
2915 /*
2916 * Suspended - return to VM.cpp.
2917 */
2918 case EMSTATE_SUSPENDED:
2919 TMR3NotifySuspend(pVM, pVCpu);
2920 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2921 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2922 return VINF_EM_SUSPEND;
2923
2924 /*
2925 * Debugging in the guest.
2926 */
2927 case EMSTATE_DEBUG_GUEST_RAW:
2928 case EMSTATE_DEBUG_GUEST_HM:
2929 case EMSTATE_DEBUG_GUEST_NEM:
2930 case EMSTATE_DEBUG_GUEST_IEM:
2931 case EMSTATE_DEBUG_GUEST_REM:
2932 TMR3NotifySuspend(pVM, pVCpu);
2933 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2934 TMR3NotifyResume(pVM, pVCpu);
2935 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2936 break;
2937
2938 /*
2939 * Debugging in the hypervisor.
2940 */
2941 case EMSTATE_DEBUG_HYPER:
2942 {
2943 TMR3NotifySuspend(pVM, pVCpu);
2944 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2945
2946 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2947 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2948 if (rc != VINF_SUCCESS)
2949 {
2950 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2951 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2952 else
2953 {
2954 /* switch to guru meditation mode */
2955 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2956 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2957 VMMR3FatalDump(pVM, pVCpu, rc);
2958 }
2959 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2960 return rc;
2961 }
2962
2963 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2964 TMR3NotifyResume(pVM, pVCpu);
2965 break;
2966 }
2967
2968 /*
2969 * Guru meditation takes place in the debugger.
2970 */
2971 case EMSTATE_GURU_MEDITATION:
2972 {
2973 TMR3NotifySuspend(pVM, pVCpu);
2974 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2975 VMMR3FatalDump(pVM, pVCpu, rc);
2976 emR3Debug(pVM, pVCpu, rc);
2977 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2978 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2979 return rc;
2980 }
2981
2982 /*
2983 * The states we don't expect here.
2984 */
2985 case EMSTATE_NONE:
2986 case EMSTATE_TERMINATING:
2987 default:
2988 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2989 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2990 TMR3NotifySuspend(pVM, pVCpu);
2991 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2992 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2993 return VERR_EM_INTERNAL_ERROR;
2994 }
2995 } /* The Outer Main Loop */
2996 }
2997 else
2998 {
2999 /*
3000 * Fatal error.
3001 */
3002 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3003 TMR3NotifySuspend(pVM, pVCpu);
3004 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3005 VMMR3FatalDump(pVM, pVCpu, rc);
3006 emR3Debug(pVM, pVCpu, rc);
3007 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3008 /** @todo change the VM state! */
3009 return rc;
3010 }
3011
3012 /* not reached */
3013}
3014
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette