VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 79843

Last change on this file since 79843 was 79843, checked in by vboxsync, 6 years ago

VMM/EM: Nested VMX: bugref:9180 Nested SVM: bugref:7243 Fix missing CPUMIsGuestPhysIntrEnabled for the VMX case and re-factored the code to be far more readable.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 127.1 KB
Line 
1/* $Id: EM.cpp 79843 2019-07-17 17:42:11Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include <VBox/err.h>
70#include "VMMTracing.h"
71
72#include <iprt/asm.h>
73#include <iprt/string.h>
74#include <iprt/stream.h>
75#include <iprt/thread.h>
76
77
78/*********************************************************************************************************************************
79* Internal Functions *
80*********************************************************************************************************************************/
81static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
82static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
83#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
84static const char *emR3GetStateName(EMSTATE enmState);
85#endif
86static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
87#if defined(VBOX_WITH_REM) || defined(DEBUG)
88static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
89#endif
90static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
91
92
93/**
94 * Initializes the EM.
95 *
96 * @returns VBox status code.
97 * @param pVM The cross context VM structure.
98 */
99VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
100{
101 LogFlow(("EMR3Init\n"));
102 /*
103 * Assert alignment and sizes.
104 */
105 AssertCompileMemberAlignment(VM, em.s, 32);
106 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
107 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
108
109 /*
110 * Init the structure.
111 */
112 pVM->em.s.offVM = RT_UOFFSETOF(VM, em.s);
113 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
114 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
115
116 bool fEnabled;
117 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
118 AssertLogRelRCReturn(rc, rc);
119 pVM->fRecompileUser = !fEnabled;
120
121 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
122 AssertLogRelRCReturn(rc, rc);
123 pVM->fRecompileSupervisor = !fEnabled;
124
125#ifdef VBOX_WITH_RAW_RING1
126 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
127 AssertLogRelRCReturn(rc, rc);
128#else
129 pVM->fRawRing1Enabled = false; /* Disabled by default. */
130#endif
131
132 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
133 AssertLogRelRCReturn(rc, rc);
134
135 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
136 AssertLogRelRCReturn(rc, rc);
137 pVM->em.s.fGuruOnTripleFault = !fEnabled;
138 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
139 {
140 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
141 pVM->em.s.fGuruOnTripleFault = true;
142 }
143
144 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
145 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
146
147 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
148 * Whether to try correlate exit history in any context, detect hot spots and
149 * try optimize these using IEM if there are other exits close by. This
150 * overrides the context specific settings. */
151 bool fExitOptimizationEnabled = true;
152 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
153 AssertLogRelRCReturn(rc, rc);
154
155 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
156 * Whether to optimize exits in ring-0. Setting this to false will also disable
157 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
158 * capabilities of the host kernel, this optimization may be unavailable. */
159 bool fExitOptimizationEnabledR0 = true;
160 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
161 AssertLogRelRCReturn(rc, rc);
162 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
163
164 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
165 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
166 * hooks are in effect). */
167 /** @todo change the default to true here */
168 bool fExitOptimizationEnabledR0PreemptDisabled = true;
169 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
170 AssertLogRelRCReturn(rc, rc);
171 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
172
173 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
174 * Maximum number of instruction to let EMHistoryExec execute in one go. */
175 uint16_t cHistoryExecMaxInstructions = 8192;
176 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
177 AssertLogRelRCReturn(rc, rc);
178 if (cHistoryExecMaxInstructions < 16)
179 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
180
181 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
182 * Maximum number of instruction between exits during probing. */
183 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
184#ifdef RT_OS_WINDOWS
185 if (VM_IS_NEM_ENABLED(pVM))
186 cHistoryProbeMaxInstructionsWithoutExit = 32;
187#endif
188 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
189 cHistoryProbeMaxInstructionsWithoutExit);
190 AssertLogRelRCReturn(rc, rc);
191 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
192 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
193 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
194
195 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
196 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
197 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
198 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
199 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
200 cHistoryProbeMinInstructions);
201 AssertLogRelRCReturn(rc, rc);
202
203 for (VMCPUID i = 0; i < pVM->cCpus; i++)
204 {
205 pVM->aCpus[i].em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
206 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
207 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
208
209 pVM->aCpus[i].em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
210 pVM->aCpus[i].em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
211 pVM->aCpus[i].em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
212 }
213
214#ifdef VBOX_WITH_REM
215 /*
216 * Initialize the REM critical section.
217 */
218 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
219 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
220 AssertRCReturn(rc, rc);
221#endif
222
223 /*
224 * Saved state.
225 */
226 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
227 NULL, NULL, NULL,
228 NULL, emR3Save, NULL,
229 NULL, emR3Load, NULL);
230 if (RT_FAILURE(rc))
231 return rc;
232
233 for (VMCPUID i = 0; i < pVM->cCpus; i++)
234 {
235 PVMCPU pVCpu = &pVM->aCpus[i];
236
237 pVCpu->em.s.enmState = i == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
238 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
239 pVCpu->em.s.fForceRAW = false;
240 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
241 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
242
243#ifdef VBOX_WITH_RAW_MODE
244 if (VM_IS_RAW_MODE_ENABLED(pVM))
245 {
246 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
247 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
248 }
249#endif
250
251# define EM_REG_COUNTER(a, b, c) \
252 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
253 AssertRC(rc);
254
255# define EM_REG_COUNTER_USED(a, b, c) \
256 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
257 AssertRC(rc);
258
259# define EM_REG_PROFILE(a, b, c) \
260 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
261 AssertRC(rc);
262
263# define EM_REG_PROFILE_ADV(a, b, c) \
264 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
265 AssertRC(rc);
266
267 /*
268 * Statistics.
269 */
270#ifdef VBOX_WITH_STATISTICS
271 PEMSTATS pStats;
272 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
273 if (RT_FAILURE(rc))
274 return rc;
275
276 pVCpu->em.s.pStatsR3 = pStats;
277 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
278 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
279
280# if 1 /* rawmode only? */
281 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
282 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
283 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
284 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
285 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
286 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
287 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
288 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
289 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
290 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
291 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
292 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
293 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
294 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
295 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
296 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
297 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
298 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
299 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
300 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
301 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
302 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
303 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
304 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
305 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
306 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
307 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
308#endif
309 pVCpu->em.s.pCliStatTree = 0;
310
311 /* these should be considered for release statistics. */
312 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
313 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
314 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
315 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
316 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
317 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
318 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
319 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
320#endif /* VBOX_WITH_STATISTICS */
321 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
322 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
323#ifdef VBOX_WITH_STATISTICS
324 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
325 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
326 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
327 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
328 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
329 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
330#endif /* VBOX_WITH_STATISTICS */
331
332 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
333 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
334 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
335 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
336 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
337
338 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
339
340 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
341 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", i);
342 AssertRC(rc);
343
344 /* History record statistics */
345 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
346 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", i);
347 AssertRC(rc);
348
349 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
350 {
351 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
352 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", i, iStep);
353 AssertRC(rc);
354 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
355 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", i, iStep);
356 AssertRC(rc);
357 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
358 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", i, iStep);
359 AssertRC(rc);
360 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
361 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", i, iStep);
362 AssertRC(rc);
363 }
364
365 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%d/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
366 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%d/ExitOpt/ExecSavedExit", "Net number of saved exits.");
367 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%d/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
368 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%d/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
369 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%d/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
370 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%d/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
371 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%d/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
372 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%d/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
373 }
374
375 emR3InitDbg(pVM);
376 return VINF_SUCCESS;
377}
378
379
380/**
381 * Called when a VM initialization stage is completed.
382 *
383 * @returns VBox status code.
384 * @param pVM The cross context VM structure.
385 * @param enmWhat The initialization state that was completed.
386 */
387VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
388{
389 if (enmWhat == VMINITCOMPLETED_RING0)
390 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
391 pVM->aCpus[0].em.s.fExitOptimizationEnabled, pVM->aCpus[0].em.s.fExitOptimizationEnabledR0,
392 pVM->aCpus[0].em.s.fExitOptimizationEnabledR0PreemptDisabled));
393 return VINF_SUCCESS;
394}
395
396
397/**
398 * Applies relocations to data and code managed by this
399 * component. This function will be called at init and
400 * whenever the VMM need to relocate it self inside the GC.
401 *
402 * @param pVM The cross context VM structure.
403 */
404VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
405{
406 LogFlow(("EMR3Relocate\n"));
407 for (VMCPUID i = 0; i < pVM->cCpus; i++)
408 {
409 PVMCPU pVCpu = &pVM->aCpus[i];
410 if (pVCpu->em.s.pStatsR3)
411 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
412 }
413}
414
415
416/**
417 * Reset the EM state for a CPU.
418 *
419 * Called by EMR3Reset and hot plugging.
420 *
421 * @param pVCpu The cross context virtual CPU structure.
422 */
423VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
424{
425 /* Reset scheduling state. */
426 pVCpu->em.s.fForceRAW = false;
427 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
428
429 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
430 out of the HALTED state here so that enmPrevState doesn't end up as
431 HALTED when EMR3Execute returns. */
432 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
433 {
434 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
435 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
436 }
437}
438
439
440/**
441 * Reset notification.
442 *
443 * @param pVM The cross context VM structure.
444 */
445VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
446{
447 Log(("EMR3Reset: \n"));
448 for (VMCPUID i = 0; i < pVM->cCpus; i++)
449 EMR3ResetCpu(&pVM->aCpus[i]);
450}
451
452
453/**
454 * Terminates the EM.
455 *
456 * Termination means cleaning up and freeing all resources,
457 * the VM it self is at this point powered off or suspended.
458 *
459 * @returns VBox status code.
460 * @param pVM The cross context VM structure.
461 */
462VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
463{
464 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
465
466#ifdef VBOX_WITH_REM
467 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
468#else
469 RT_NOREF(pVM);
470#endif
471 return VINF_SUCCESS;
472}
473
474
475/**
476 * Execute state save operation.
477 *
478 * @returns VBox status code.
479 * @param pVM The cross context VM structure.
480 * @param pSSM SSM operation handle.
481 */
482static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
483{
484 for (VMCPUID i = 0; i < pVM->cCpus; i++)
485 {
486 PVMCPU pVCpu = &pVM->aCpus[i];
487
488 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
489
490 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
491 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
492 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
493
494 /* Save mwait state. */
495 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
496 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
497 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
498 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
499 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
500 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
501 AssertRCReturn(rc, rc);
502 }
503 return VINF_SUCCESS;
504}
505
506
507/**
508 * Execute state load operation.
509 *
510 * @returns VBox status code.
511 * @param pVM The cross context VM structure.
512 * @param pSSM SSM operation handle.
513 * @param uVersion Data layout version.
514 * @param uPass The data pass.
515 */
516static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
517{
518 /*
519 * Validate version.
520 */
521 if ( uVersion > EM_SAVED_STATE_VERSION
522 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
523 {
524 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
525 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
526 }
527 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
528
529 /*
530 * Load the saved state.
531 */
532 for (VMCPUID i = 0; i < pVM->cCpus; i++)
533 {
534 PVMCPU pVCpu = &pVM->aCpus[i];
535
536 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
537 if (RT_FAILURE(rc))
538 pVCpu->em.s.fForceRAW = false;
539 AssertRCReturn(rc, rc);
540
541 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
542 {
543 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
544 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
545 AssertRCReturn(rc, rc);
546 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
547
548 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
549 }
550 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
551 {
552 /* Load mwait state. */
553 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
554 AssertRCReturn(rc, rc);
555 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
556 AssertRCReturn(rc, rc);
557 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
558 AssertRCReturn(rc, rc);
559 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
560 AssertRCReturn(rc, rc);
561 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
562 AssertRCReturn(rc, rc);
563 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
564 AssertRCReturn(rc, rc);
565 }
566
567 Assert(!pVCpu->em.s.pCliStatTree);
568 }
569 return VINF_SUCCESS;
570}
571
572
573/**
574 * Argument packet for emR3SetExecutionPolicy.
575 */
576struct EMR3SETEXECPOLICYARGS
577{
578 EMEXECPOLICY enmPolicy;
579 bool fEnforce;
580};
581
582
583/**
584 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
585 */
586static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
587{
588 /*
589 * Only the first CPU changes the variables.
590 */
591 if (pVCpu->idCpu == 0)
592 {
593 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
594 switch (pArgs->enmPolicy)
595 {
596 case EMEXECPOLICY_RECOMPILE_RING0:
597 pVM->fRecompileSupervisor = pArgs->fEnforce;
598 break;
599 case EMEXECPOLICY_RECOMPILE_RING3:
600 pVM->fRecompileUser = pArgs->fEnforce;
601 break;
602 case EMEXECPOLICY_IEM_ALL:
603 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
604 break;
605 default:
606 AssertFailedReturn(VERR_INVALID_PARAMETER);
607 }
608 Log(("EM: Set execution policy (fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool)\n",
609 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
610 }
611
612 /*
613 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
614 */
615 return pVCpu->em.s.enmState == EMSTATE_RAW
616 || pVCpu->em.s.enmState == EMSTATE_HM
617 || pVCpu->em.s.enmState == EMSTATE_NEM
618 || pVCpu->em.s.enmState == EMSTATE_IEM
619 || pVCpu->em.s.enmState == EMSTATE_REM
620 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
621 ? VINF_EM_RESCHEDULE
622 : VINF_SUCCESS;
623}
624
625
626/**
627 * Changes an execution scheduling policy parameter.
628 *
629 * This is used to enable or disable raw-mode / hardware-virtualization
630 * execution of user and supervisor code.
631 *
632 * @returns VINF_SUCCESS on success.
633 * @returns VINF_RESCHEDULE if a rescheduling might be required.
634 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
635 *
636 * @param pUVM The user mode VM handle.
637 * @param enmPolicy The scheduling policy to change.
638 * @param fEnforce Whether to enforce the policy or not.
639 */
640VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
641{
642 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
643 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
644 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
645
646 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
647 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
648}
649
650
651/**
652 * Queries an execution scheduling policy parameter.
653 *
654 * @returns VBox status code
655 * @param pUVM The user mode VM handle.
656 * @param enmPolicy The scheduling policy to query.
657 * @param pfEnforced Where to return the current value.
658 */
659VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
660{
661 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
662 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
663 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
664 PVM pVM = pUVM->pVM;
665 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
666
667 /* No need to bother EMTs with a query. */
668 switch (enmPolicy)
669 {
670 case EMEXECPOLICY_RECOMPILE_RING0:
671 *pfEnforced = pVM->fRecompileSupervisor;
672 break;
673 case EMEXECPOLICY_RECOMPILE_RING3:
674 *pfEnforced = pVM->fRecompileUser;
675 break;
676 case EMEXECPOLICY_IEM_ALL:
677 *pfEnforced = pVM->em.s.fIemExecutesAll;
678 break;
679 default:
680 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
681 }
682
683 return VINF_SUCCESS;
684}
685
686
687/**
688 * Queries the main execution engine of the VM.
689 *
690 * @returns VBox status code
691 * @param pUVM The user mode VM handle.
692 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
693 */
694VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
695{
696 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
697 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
698
699 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
700 PVM pVM = pUVM->pVM;
701 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
702
703 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
704 return VINF_SUCCESS;
705}
706
707
708/**
709 * Raise a fatal error.
710 *
711 * Safely terminate the VM with full state report and stuff. This function
712 * will naturally never return.
713 *
714 * @param pVCpu The cross context virtual CPU structure.
715 * @param rc VBox status code.
716 */
717VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
718{
719 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
720 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
721}
722
723
724#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
725/**
726 * Gets the EM state name.
727 *
728 * @returns pointer to read only state name,
729 * @param enmState The state.
730 */
731static const char *emR3GetStateName(EMSTATE enmState)
732{
733 switch (enmState)
734 {
735 case EMSTATE_NONE: return "EMSTATE_NONE";
736 case EMSTATE_RAW: return "EMSTATE_RAW";
737 case EMSTATE_HM: return "EMSTATE_HM";
738 case EMSTATE_IEM: return "EMSTATE_IEM";
739 case EMSTATE_REM: return "EMSTATE_REM";
740 case EMSTATE_HALTED: return "EMSTATE_HALTED";
741 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
742 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
743 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
744 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
745 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
746 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
747 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
748 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
749 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
750 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
751 case EMSTATE_NEM: return "EMSTATE_NEM";
752 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
753 default: return "Unknown!";
754 }
755}
756#endif /* LOG_ENABLED || VBOX_STRICT */
757
758
759/**
760 * Handle pending ring-3 I/O port write.
761 *
762 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
763 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
764 *
765 * @returns Strict VBox status code.
766 * @param pVM The cross context VM structure.
767 * @param pVCpu The cross context virtual CPU structure.
768 */
769VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
770{
771 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
772
773 /* Get and clear the pending data. */
774 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
775 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
776 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
777 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
778 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
779
780 /* Assert sanity. */
781 switch (cbValue)
782 {
783 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
784 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
785 case 4: break;
786 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
787 }
788 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
789
790 /* Do the work.*/
791 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
792 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
793 if (IOM_SUCCESS(rcStrict))
794 {
795 pVCpu->cpum.GstCtx.rip += cbInstr;
796 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
797 }
798 return rcStrict;
799}
800
801
802/**
803 * Handle pending ring-3 I/O port write.
804 *
805 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
806 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
807 *
808 * @returns Strict VBox status code.
809 * @param pVM The cross context VM structure.
810 * @param pVCpu The cross context virtual CPU structure.
811 */
812VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
813{
814 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
815
816 /* Get and clear the pending data. */
817 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
818 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
819 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
820 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
821
822 /* Assert sanity. */
823 switch (cbValue)
824 {
825 case 1: break;
826 case 2: break;
827 case 4: break;
828 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
829 }
830 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
831 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
832
833 /* Do the work.*/
834 uint32_t uValue = 0;
835 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
836 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
837 if (IOM_SUCCESS(rcStrict))
838 {
839 if (cbValue == 4)
840 pVCpu->cpum.GstCtx.rax = uValue;
841 else if (cbValue == 2)
842 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
843 else
844 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
845 pVCpu->cpum.GstCtx.rip += cbInstr;
846 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
847 }
848 return rcStrict;
849}
850
851
852/**
853 * Debug loop.
854 *
855 * @returns VBox status code for EM.
856 * @param pVM The cross context VM structure.
857 * @param pVCpu The cross context virtual CPU structure.
858 * @param rc Current EM VBox status code.
859 */
860static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
861{
862 for (;;)
863 {
864 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
865 const VBOXSTRICTRC rcLast = rc;
866
867 /*
868 * Debug related RC.
869 */
870 switch (VBOXSTRICTRC_VAL(rc))
871 {
872 /*
873 * Single step an instruction.
874 */
875 case VINF_EM_DBG_STEP:
876 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
877 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
878 || pVCpu->em.s.fForceRAW /* paranoia */)
879#ifdef VBOX_WITH_RAW_MODE
880 rc = emR3RawStep(pVM, pVCpu);
881#else
882 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
883#endif
884 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
885 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
886 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
887 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
888#ifdef VBOX_WITH_REM
889 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
890 rc = emR3RemStep(pVM, pVCpu);
891#endif
892 else
893 {
894 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
895 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
896 rc = VINF_EM_DBG_STEPPED;
897 }
898 break;
899
900 /*
901 * Simple events: stepped, breakpoint, stop/assertion.
902 */
903 case VINF_EM_DBG_STEPPED:
904 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
905 break;
906
907 case VINF_EM_DBG_BREAKPOINT:
908 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
909 break;
910
911 case VINF_EM_DBG_STOP:
912 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
913 break;
914
915 case VINF_EM_DBG_EVENT:
916 rc = DBGFR3EventHandlePending(pVM, pVCpu);
917 break;
918
919 case VINF_EM_DBG_HYPER_STEPPED:
920 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
921 break;
922
923 case VINF_EM_DBG_HYPER_BREAKPOINT:
924 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
925 break;
926
927 case VINF_EM_DBG_HYPER_ASSERTION:
928 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
929 RTLogFlush(NULL);
930 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
931 break;
932
933 /*
934 * Guru meditation.
935 */
936 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
937 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
938 break;
939 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
940 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
941 break;
942 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
943 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
944 break;
945
946 default: /** @todo don't use default for guru, but make special errors code! */
947 {
948 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
949 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
950 break;
951 }
952 }
953
954 /*
955 * Process the result.
956 */
957 switch (VBOXSTRICTRC_VAL(rc))
958 {
959 /*
960 * Continue the debugging loop.
961 */
962 case VINF_EM_DBG_STEP:
963 case VINF_EM_DBG_STOP:
964 case VINF_EM_DBG_EVENT:
965 case VINF_EM_DBG_STEPPED:
966 case VINF_EM_DBG_BREAKPOINT:
967 case VINF_EM_DBG_HYPER_STEPPED:
968 case VINF_EM_DBG_HYPER_BREAKPOINT:
969 case VINF_EM_DBG_HYPER_ASSERTION:
970 break;
971
972 /*
973 * Resuming execution (in some form) has to be done here if we got
974 * a hypervisor debug event.
975 */
976 case VINF_SUCCESS:
977 case VINF_EM_RESUME:
978 case VINF_EM_SUSPEND:
979 case VINF_EM_RESCHEDULE:
980 case VINF_EM_RESCHEDULE_RAW:
981 case VINF_EM_RESCHEDULE_REM:
982 case VINF_EM_HALT:
983 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
984 {
985#ifdef VBOX_WITH_RAW_MODE
986 rc = emR3RawResumeHyper(pVM, pVCpu);
987 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
988 continue;
989#else
990 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
991#endif
992 }
993 if (rc == VINF_SUCCESS)
994 rc = VINF_EM_RESCHEDULE;
995 return rc;
996
997 /*
998 * The debugger isn't attached.
999 * We'll simply turn the thing off since that's the easiest thing to do.
1000 */
1001 case VERR_DBGF_NOT_ATTACHED:
1002 switch (VBOXSTRICTRC_VAL(rcLast))
1003 {
1004 case VINF_EM_DBG_HYPER_STEPPED:
1005 case VINF_EM_DBG_HYPER_BREAKPOINT:
1006 case VINF_EM_DBG_HYPER_ASSERTION:
1007 case VERR_TRPM_PANIC:
1008 case VERR_TRPM_DONT_PANIC:
1009 case VERR_VMM_RING0_ASSERTION:
1010 case VERR_VMM_HYPER_CR3_MISMATCH:
1011 case VERR_VMM_RING3_CALL_DISABLED:
1012 return rcLast;
1013 }
1014 return VINF_EM_OFF;
1015
1016 /*
1017 * Status codes terminating the VM in one or another sense.
1018 */
1019 case VINF_EM_TERMINATE:
1020 case VINF_EM_OFF:
1021 case VINF_EM_RESET:
1022 case VINF_EM_NO_MEMORY:
1023 case VINF_EM_RAW_STALE_SELECTOR:
1024 case VINF_EM_RAW_IRET_TRAP:
1025 case VERR_TRPM_PANIC:
1026 case VERR_TRPM_DONT_PANIC:
1027 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1028 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1029 case VERR_VMM_RING0_ASSERTION:
1030 case VERR_VMM_HYPER_CR3_MISMATCH:
1031 case VERR_VMM_RING3_CALL_DISABLED:
1032 case VERR_INTERNAL_ERROR:
1033 case VERR_INTERNAL_ERROR_2:
1034 case VERR_INTERNAL_ERROR_3:
1035 case VERR_INTERNAL_ERROR_4:
1036 case VERR_INTERNAL_ERROR_5:
1037 case VERR_IPE_UNEXPECTED_STATUS:
1038 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1039 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1040 return rc;
1041
1042 /*
1043 * The rest is unexpected, and will keep us here.
1044 */
1045 default:
1046 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1047 break;
1048 }
1049 } /* debug for ever */
1050}
1051
1052
1053#if defined(VBOX_WITH_REM) || defined(DEBUG)
1054/**
1055 * Steps recompiled code.
1056 *
1057 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1058 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1059 *
1060 * @param pVM The cross context VM structure.
1061 * @param pVCpu The cross context virtual CPU structure.
1062 */
1063static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1064{
1065 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1066
1067# ifdef VBOX_WITH_REM
1068 EMRemLock(pVM);
1069
1070 /*
1071 * Switch to REM, step instruction, switch back.
1072 */
1073 int rc = REMR3State(pVM, pVCpu);
1074 if (RT_SUCCESS(rc))
1075 {
1076 rc = REMR3Step(pVM, pVCpu);
1077 REMR3StateBack(pVM, pVCpu);
1078 }
1079 EMRemUnlock(pVM);
1080
1081# else
1082 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1083# endif
1084
1085 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1086 return rc;
1087}
1088#endif /* VBOX_WITH_REM || DEBUG */
1089
1090
1091#ifdef VBOX_WITH_REM
1092/**
1093 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1094 * critical section.
1095 *
1096 * @returns false - new fInREMState value.
1097 * @param pVM The cross context VM structure.
1098 * @param pVCpu The cross context virtual CPU structure.
1099 */
1100DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1101{
1102 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1103 REMR3StateBack(pVM, pVCpu);
1104 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1105
1106 EMRemUnlock(pVM);
1107 return false;
1108}
1109#endif
1110
1111
1112/**
1113 * Executes recompiled code.
1114 *
1115 * This function contains the recompiler version of the inner
1116 * execution loop (the outer loop being in EMR3ExecuteVM()).
1117 *
1118 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1119 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1120 *
1121 * @param pVM The cross context VM structure.
1122 * @param pVCpu The cross context virtual CPU structure.
1123 * @param pfFFDone Where to store an indicator telling whether or not
1124 * FFs were done before returning.
1125 *
1126 */
1127static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1128{
1129#ifdef LOG_ENABLED
1130 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1131
1132 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1133 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1134 else
1135 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1136#endif
1137 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1138
1139#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1140 AssertMsg( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1141 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1142 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1143#endif
1144
1145 /*
1146 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1147 * or the REM suggests raw-mode execution.
1148 */
1149 *pfFFDone = false;
1150#ifdef VBOX_WITH_REM
1151 bool fInREMState = false;
1152#else
1153 uint32_t cLoops = 0;
1154#endif
1155 int rc = VINF_SUCCESS;
1156 for (;;)
1157 {
1158#ifdef VBOX_WITH_REM
1159 /*
1160 * Lock REM and update the state if not already in sync.
1161 *
1162 * Note! Big lock, but you are not supposed to own any lock when
1163 * coming in here.
1164 */
1165 if (!fInREMState)
1166 {
1167 EMRemLock(pVM);
1168 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1169
1170 /* Flush the recompiler translation blocks if the VCPU has changed,
1171 also force a full CPU state resync. */
1172 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1173 {
1174 REMFlushTBs(pVM);
1175 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1176 }
1177 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1178
1179 rc = REMR3State(pVM, pVCpu);
1180
1181 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1182 if (RT_FAILURE(rc))
1183 break;
1184 fInREMState = true;
1185
1186 /*
1187 * We might have missed the raising of VMREQ, TIMER and some other
1188 * important FFs while we were busy switching the state. So, check again.
1189 */
1190 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1191 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1192 {
1193 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1194 goto l_REMDoForcedActions;
1195 }
1196 }
1197#endif
1198
1199 /*
1200 * Execute REM.
1201 */
1202 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1203 {
1204 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1205#ifdef VBOX_WITH_REM
1206 rc = REMR3Run(pVM, pVCpu);
1207#else
1208 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 8192 /*cMaxInstructions*/, 4095 /*cPollRate*/, NULL /*pcInstructions*/));
1209#endif
1210 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1211 }
1212 else
1213 {
1214 /* Give up this time slice; virtual time continues */
1215 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1216 RTThreadSleep(5);
1217 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1218 rc = VINF_SUCCESS;
1219 }
1220
1221 /*
1222 * Deal with high priority post execution FFs before doing anything
1223 * else. Sync back the state and leave the lock to be on the safe side.
1224 */
1225 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1226 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1227 {
1228#ifdef VBOX_WITH_REM
1229 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1230#endif
1231 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1232 }
1233
1234 /*
1235 * Process the returned status code.
1236 */
1237 if (rc != VINF_SUCCESS)
1238 {
1239 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1240 break;
1241 if (rc != VINF_REM_INTERRUPED_FF)
1242 {
1243#ifndef VBOX_WITH_REM
1244 /* Try dodge unimplemented IEM trouble by reschduling. */
1245 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1246 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1247 {
1248 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1249 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1250 {
1251 rc = VINF_EM_RESCHEDULE;
1252 break;
1253 }
1254 }
1255#endif
1256
1257 /*
1258 * Anything which is not known to us means an internal error
1259 * and the termination of the VM!
1260 */
1261 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1262 break;
1263 }
1264 }
1265
1266
1267 /*
1268 * Check and execute forced actions.
1269 *
1270 * Sync back the VM state and leave the lock before calling any of
1271 * these, you never know what's going to happen here.
1272 */
1273#ifdef VBOX_HIGH_RES_TIMERS_HACK
1274 TMTimerPollVoid(pVM, pVCpu);
1275#endif
1276 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1277 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1278 || VMCPU_FF_IS_ANY_SET(pVCpu,
1279 VMCPU_FF_ALL_REM_MASK
1280 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1281 {
1282#ifdef VBOX_WITH_REM
1283l_REMDoForcedActions:
1284 if (fInREMState)
1285 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1286#endif
1287 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1288 rc = emR3ForcedActions(pVM, pVCpu, rc);
1289 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1290 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1291 if ( rc != VINF_SUCCESS
1292 && rc != VINF_EM_RESCHEDULE_REM)
1293 {
1294 *pfFFDone = true;
1295 break;
1296 }
1297 }
1298
1299#ifndef VBOX_WITH_REM
1300 /*
1301 * Have to check if we can get back to fast execution mode every so often.
1302 */
1303 if (!(++cLoops & 7))
1304 {
1305 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1306 if ( enmCheck != EMSTATE_REM
1307 && enmCheck != EMSTATE_IEM_THEN_REM)
1308 return VINF_EM_RESCHEDULE;
1309 }
1310#endif
1311
1312 } /* The Inner Loop, recompiled execution mode version. */
1313
1314
1315#ifdef VBOX_WITH_REM
1316 /*
1317 * Returning. Sync back the VM state if required.
1318 */
1319 if (fInREMState)
1320 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1321#endif
1322
1323 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1324 return rc;
1325}
1326
1327
1328#ifdef DEBUG
1329
1330int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1331{
1332 EMSTATE enmOldState = pVCpu->em.s.enmState;
1333
1334 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1335
1336 Log(("Single step BEGIN:\n"));
1337 for (uint32_t i = 0; i < cIterations; i++)
1338 {
1339 DBGFR3PrgStep(pVCpu);
1340 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1341 emR3RemStep(pVM, pVCpu);
1342 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1343 break;
1344 }
1345 Log(("Single step END:\n"));
1346 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1347 pVCpu->em.s.enmState = enmOldState;
1348 return VINF_EM_RESCHEDULE;
1349}
1350
1351#endif /* DEBUG */
1352
1353
1354/**
1355 * Try execute the problematic code in IEM first, then fall back on REM if there
1356 * is too much of it or if IEM doesn't implement something.
1357 *
1358 * @returns Strict VBox status code from IEMExecLots.
1359 * @param pVM The cross context VM structure.
1360 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1361 * @param pfFFDone Force flags done indicator.
1362 *
1363 * @thread EMT(pVCpu)
1364 */
1365static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1366{
1367 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1368 *pfFFDone = false;
1369
1370 /*
1371 * Execute in IEM for a while.
1372 */
1373 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1374 {
1375 uint32_t cInstructions;
1376 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, 1024 - pVCpu->em.s.cIemThenRemInstructions /*cMaxInstructions*/,
1377 UINT32_MAX/2 /*cPollRate*/, &cInstructions);
1378 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1379 if (rcStrict != VINF_SUCCESS)
1380 {
1381 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1382 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1383 break;
1384
1385 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1386 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1387 return rcStrict;
1388 }
1389
1390 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1391 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1392 {
1393 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1394 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1395 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1396 pVCpu->em.s.enmState = enmNewState;
1397 return VINF_SUCCESS;
1398 }
1399
1400 /*
1401 * Check for pending actions.
1402 */
1403 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1404 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1405 return VINF_SUCCESS;
1406 }
1407
1408 /*
1409 * Switch to REM.
1410 */
1411 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1412 pVCpu->em.s.enmState = EMSTATE_REM;
1413 return VINF_SUCCESS;
1414}
1415
1416
1417/**
1418 * Decides whether to execute RAW, HWACC or REM.
1419 *
1420 * @returns new EM state
1421 * @param pVM The cross context VM structure.
1422 * @param pVCpu The cross context virtual CPU structure.
1423 */
1424EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1425{
1426 /*
1427 * When forcing raw-mode execution, things are simple.
1428 */
1429 if (pVCpu->em.s.fForceRAW)
1430 return EMSTATE_RAW;
1431
1432 /*
1433 * We stay in the wait for SIPI state unless explicitly told otherwise.
1434 */
1435 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1436 return EMSTATE_WAIT_SIPI;
1437
1438 /*
1439 * Execute everything in IEM?
1440 */
1441 if (pVM->em.s.fIemExecutesAll)
1442 return EMSTATE_IEM;
1443
1444 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1445 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1446 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1447
1448 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1449 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1450 {
1451 if (EMIsHwVirtExecutionEnabled(pVM))
1452 {
1453 if (VM_IS_HM_ENABLED(pVM))
1454 {
1455 if (HMCanExecuteGuest(pVCpu, &pVCpu->cpum.GstCtx))
1456 return EMSTATE_HM;
1457 }
1458 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1459 return EMSTATE_NEM;
1460
1461 /*
1462 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1463 * turns off monitoring features essential for raw mode!
1464 */
1465 return EMSTATE_IEM_THEN_REM;
1466 }
1467 }
1468
1469 /*
1470 * Standard raw-mode:
1471 *
1472 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1473 * or 32 bits protected mode ring 0 code
1474 *
1475 * The tests are ordered by the likelihood of being true during normal execution.
1476 */
1477 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1478 {
1479 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1480 return EMSTATE_REM;
1481 }
1482
1483# ifndef VBOX_RAW_V86
1484 if (EFlags.u32 & X86_EFL_VM) {
1485 Log2(("raw mode refused: VM_MASK\n"));
1486 return EMSTATE_REM;
1487 }
1488# endif
1489
1490 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1491 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1492 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1493 {
1494 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1495 return EMSTATE_REM;
1496 }
1497
1498 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1499 {
1500 uint32_t u32Dummy, u32Features;
1501
1502 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1503 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1504 return EMSTATE_REM;
1505 }
1506
1507 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1508 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1509 || (uSS & X86_SEL_RPL) == 3)
1510 {
1511 if (!EMIsRawRing3Enabled(pVM))
1512 return EMSTATE_REM;
1513
1514 if (!(EFlags.u32 & X86_EFL_IF))
1515 {
1516 Log2(("raw mode refused: IF (RawR3)\n"));
1517 return EMSTATE_REM;
1518 }
1519
1520 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1521 {
1522 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1523 return EMSTATE_REM;
1524 }
1525 }
1526 else
1527 {
1528 if (!EMIsRawRing0Enabled(pVM))
1529 return EMSTATE_REM;
1530
1531 if (EMIsRawRing1Enabled(pVM))
1532 {
1533 /* Only ring 0 and 1 supervisor code. */
1534 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1535 {
1536 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1537 return EMSTATE_REM;
1538 }
1539 }
1540 /* Only ring 0 supervisor code. */
1541 else if ((uSS & X86_SEL_RPL) != 0)
1542 {
1543 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1544 return EMSTATE_REM;
1545 }
1546
1547 // Let's start with pure 32 bits ring 0 code first
1548 /** @todo What's pure 32-bit mode? flat? */
1549 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1550 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1551 {
1552 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1553 return EMSTATE_REM;
1554 }
1555
1556 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1557 if (!(u32CR0 & X86_CR0_WP))
1558 {
1559 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1560 return EMSTATE_REM;
1561 }
1562
1563# ifdef VBOX_WITH_RAW_MODE
1564 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pVCpu->cpum.GstCtx.eip))
1565 {
1566 Log2(("raw r0 mode forced: patch code\n"));
1567# ifdef VBOX_WITH_SAFE_STR
1568 Assert(pVCpu->cpum.GstCtx.tr.Sel);
1569# endif
1570 return EMSTATE_RAW;
1571 }
1572# endif /* VBOX_WITH_RAW_MODE */
1573
1574# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1575 if (!(EFlags.u32 & X86_EFL_IF))
1576 {
1577 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1578 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1579 return EMSTATE_REM;
1580 }
1581# endif
1582
1583# ifndef VBOX_WITH_RAW_RING1
1584 /** @todo still necessary??? */
1585 if (EFlags.Bits.u2IOPL != 0)
1586 {
1587 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1588 return EMSTATE_REM;
1589 }
1590# endif
1591 }
1592
1593 /*
1594 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1595 */
1596 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1597 {
1598 Log2(("raw mode refused: stale CS\n"));
1599 return EMSTATE_REM;
1600 }
1601 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1602 {
1603 Log2(("raw mode refused: stale SS\n"));
1604 return EMSTATE_REM;
1605 }
1606 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1607 {
1608 Log2(("raw mode refused: stale DS\n"));
1609 return EMSTATE_REM;
1610 }
1611 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1612 {
1613 Log2(("raw mode refused: stale ES\n"));
1614 return EMSTATE_REM;
1615 }
1616 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1617 {
1618 Log2(("raw mode refused: stale FS\n"));
1619 return EMSTATE_REM;
1620 }
1621 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1622 {
1623 Log2(("raw mode refused: stale GS\n"));
1624 return EMSTATE_REM;
1625 }
1626
1627# ifdef VBOX_WITH_SAFE_STR
1628 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1629 {
1630 Log(("Raw mode refused -> TR=0\n"));
1631 return EMSTATE_REM;
1632 }
1633# endif
1634
1635 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1636 return EMSTATE_RAW;
1637}
1638
1639
1640/**
1641 * Executes all high priority post execution force actions.
1642 *
1643 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1644 * fatal error status code.
1645 *
1646 * @param pVM The cross context VM structure.
1647 * @param pVCpu The cross context virtual CPU structure.
1648 * @param rc The current strict VBox status code rc.
1649 */
1650VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1651{
1652 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1653
1654 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1655 PDMCritSectBothFF(pVCpu);
1656
1657 /* Update CR3 (Nested Paging case for HM). */
1658 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1659 {
1660 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1661 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1662 if (RT_FAILURE(rc2))
1663 return rc2;
1664 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1665 }
1666
1667 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1668 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1669 {
1670 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1671 if (CPUMIsGuestInPAEMode(pVCpu))
1672 {
1673 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1674 AssertPtr(pPdpes);
1675
1676 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1677 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1678 }
1679 else
1680 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1681 }
1682
1683 /* IEM has pending work (typically memory write after INS instruction). */
1684 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1685 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1686
1687 /* IOM has pending work (comitting an I/O or MMIO write). */
1688 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1689 {
1690 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1691 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1692 { /* half likely, or at least it's a line shorter. */ }
1693 else if (rc == VINF_SUCCESS)
1694 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1695 else
1696 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1697 }
1698
1699#ifdef VBOX_WITH_RAW_MODE
1700 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1701 CSAMR3DoPendingAction(pVM, pVCpu);
1702#endif
1703
1704 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1705 {
1706 if ( rc > VINF_EM_NO_MEMORY
1707 && rc <= VINF_EM_LAST)
1708 rc = VINF_EM_NO_MEMORY;
1709 }
1710
1711 return rc;
1712}
1713
1714
1715/**
1716 * Helper for emR3ForcedActions() for VMX external interrupt VM-exit.
1717 *
1718 * @returns VBox status code.
1719 * @retval VINF_NO_CHANGE if the VMX external interrupt intercept was not active.
1720 * @param pVCpu The cross context virtual CPU structure.
1721 */
1722static int emR3VmxNstGstIntrIntercept(PVMCPU pVCpu)
1723{
1724#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1725 /* Handle the "external interrupt" VM-exit intercept. */
1726 if ( CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_EXT_INT_EXIT)
1727 && !CPUMIsGuestVmxExitCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_EXIT_CTLS_ACK_EXT_INT))
1728 {
1729 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitExtInt(pVCpu, 0 /* uVector */, true /* fIntPending */);
1730 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1731 && rcStrict != VINF_VMX_VMEXIT
1732 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1733 if (rcStrict != VINF_VMX_INTERCEPT_NOT_ACTIVE)
1734 return VBOXSTRICTRC_TODO(rcStrict);
1735 }
1736#else
1737 RT_NOREF(pVCpu);
1738#endif
1739 return VINF_NO_CHANGE;
1740}
1741
1742
1743/**
1744 * Helper for emR3ForcedActions() for SVM interrupt intercept.
1745 *
1746 * @returns VBox status code.
1747 * @retval VINF_NO_CHANGE if the SVM external interrupt intercept was not active.
1748 * @param pVCpu The cross context virtual CPU structure.
1749 */
1750static int emR3SvmNstGstIntrIntercept(PVMCPU pVCpu)
1751{
1752#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1753 /* Handle the physical interrupt intercept (can be masked by the guest hypervisor). */
1754 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1755 {
1756 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1757 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1758 if (RT_SUCCESS(rcStrict))
1759 {
1760 AssertMsg( rcStrict != VINF_PGM_CHANGE_MODE
1761 && rcStrict != VINF_SVM_VMEXIT
1762 && rcStrict != VINF_NO_CHANGE, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1763 return VBOXSTRICTRC_VAL(rcStrict);
1764 }
1765
1766 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1767 return VINF_EM_TRIPLE_FAULT;
1768 }
1769#else
1770 NOREF(pVCpu);
1771#endif
1772 return VINF_NO_CHANGE;
1773}
1774
1775
1776/**
1777 * Helper for emR3ForcedActions() for SVM virtual interrupt intercept.
1778 *
1779 * @returns VBox status code.
1780 * @retval VINF_NO_CHANGE if the SVM virtual interrupt intercept was not active.
1781 * @param pVCpu The cross context virtual CPU structure.
1782 */
1783static int emR3SvmNstGstVirtIntrIntercept(PVMCPU pVCpu)
1784{
1785#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1786 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1787 {
1788 CPUM_ASSERT_NOT_EXTRN(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1789 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1790 if (RT_SUCCESS(rcStrict))
1791 {
1792 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1793 Assert(rcStrict != VINF_SVM_VMEXIT);
1794 return VBOXSTRICTRC_VAL(rcStrict);
1795 }
1796 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1797 return VINF_EM_TRIPLE_FAULT;
1798 }
1799#else
1800 NOREF(pVCpu);
1801#endif
1802 return VINF_NO_CHANGE;
1803}
1804
1805
1806/**
1807 * Executes all pending forced actions.
1808 *
1809 * Forced actions can cause execution delays and execution
1810 * rescheduling. The first we deal with using action priority, so
1811 * that for instance pending timers aren't scheduled and ran until
1812 * right before execution. The rescheduling we deal with using
1813 * return codes. The same goes for VM termination, only in that case
1814 * we exit everything.
1815 *
1816 * @returns VBox status code of equal or greater importance/severity than rc.
1817 * The most important ones are: VINF_EM_RESCHEDULE,
1818 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1819 *
1820 * @param pVM The cross context VM structure.
1821 * @param pVCpu The cross context virtual CPU structure.
1822 * @param rc The current rc.
1823 *
1824 */
1825int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1826{
1827 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1828#ifdef VBOX_STRICT
1829 int rcIrq = VINF_SUCCESS;
1830#endif
1831 int rc2;
1832#define UPDATE_RC() \
1833 do { \
1834 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1835 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1836 break; \
1837 if (!rc || rc2 < rc) \
1838 rc = rc2; \
1839 } while (0)
1840 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1841
1842 /*
1843 * Post execution chunk first.
1844 */
1845 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1846 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1847 {
1848 /*
1849 * EMT Rendezvous (must be serviced before termination).
1850 */
1851 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1852 {
1853 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1854 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1855 UPDATE_RC();
1856 /** @todo HACK ALERT! The following test is to make sure EM+TM
1857 * thinks the VM is stopped/reset before the next VM state change
1858 * is made. We need a better solution for this, or at least make it
1859 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1860 * VINF_EM_SUSPEND). */
1861 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1862 {
1863 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1864 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1865 return rc;
1866 }
1867 }
1868
1869 /*
1870 * State change request (cleared by vmR3SetStateLocked).
1871 */
1872 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1873 {
1874 VMSTATE enmState = VMR3GetState(pVM);
1875 switch (enmState)
1876 {
1877 case VMSTATE_FATAL_ERROR:
1878 case VMSTATE_FATAL_ERROR_LS:
1879 case VMSTATE_GURU_MEDITATION:
1880 case VMSTATE_GURU_MEDITATION_LS:
1881 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1882 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1883 return VINF_EM_SUSPEND;
1884
1885 case VMSTATE_DESTROYING:
1886 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1887 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1888 return VINF_EM_TERMINATE;
1889
1890 default:
1891 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1892 }
1893 }
1894
1895 /*
1896 * Debugger Facility polling.
1897 */
1898 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
1899 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
1900 {
1901 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1902 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1903 UPDATE_RC();
1904 }
1905
1906 /*
1907 * Postponed reset request.
1908 */
1909 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1910 {
1911 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1912 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1913 UPDATE_RC();
1914 }
1915
1916#ifdef VBOX_WITH_RAW_MODE
1917 /*
1918 * CSAM page scanning.
1919 */
1920 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
1921 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1922 {
1923 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1924 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1925 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1926 CSAMR3CheckCodeEx(pVM, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip);
1927 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1928 }
1929#endif
1930
1931 /*
1932 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1933 */
1934 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1935 {
1936 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1937 UPDATE_RC();
1938 if (rc == VINF_EM_NO_MEMORY)
1939 return rc;
1940 }
1941
1942 /* check that we got them all */
1943 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1944 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1945 }
1946
1947 /*
1948 * Normal priority then.
1949 * (Executed in no particular order.)
1950 */
1951 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1952 {
1953 /*
1954 * PDM Queues are pending.
1955 */
1956 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1957 PDMR3QueueFlushAll(pVM);
1958
1959 /*
1960 * PDM DMA transfers are pending.
1961 */
1962 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1963 PDMR3DmaRun(pVM);
1964
1965 /*
1966 * EMT Rendezvous (make sure they are handled before the requests).
1967 */
1968 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1969 {
1970 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1971 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1972 UPDATE_RC();
1973 /** @todo HACK ALERT! The following test is to make sure EM+TM
1974 * thinks the VM is stopped/reset before the next VM state change
1975 * is made. We need a better solution for this, or at least make it
1976 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1977 * VINF_EM_SUSPEND). */
1978 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1979 {
1980 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1981 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1982 return rc;
1983 }
1984 }
1985
1986 /*
1987 * Requests from other threads.
1988 */
1989 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1990 {
1991 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1992 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1993 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1994 {
1995 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1996 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1997 return rc2;
1998 }
1999 UPDATE_RC();
2000 /** @todo HACK ALERT! The following test is to make sure EM+TM
2001 * thinks the VM is stopped/reset before the next VM state change
2002 * is made. We need a better solution for this, or at least make it
2003 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2004 * VINF_EM_SUSPEND). */
2005 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2006 {
2007 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2008 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2009 return rc;
2010 }
2011 }
2012
2013#ifdef VBOX_WITH_REM
2014 /* Replay the handler notification changes. */
2015 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2016 {
2017 /* Try not to cause deadlocks. */
2018 if ( pVM->cCpus == 1
2019 || ( !PGMIsLockOwner(pVM)
2020 && !IOMIsLockWriteOwner(pVM))
2021 )
2022 {
2023 EMRemLock(pVM);
2024 REMR3ReplayHandlerNotifications(pVM);
2025 EMRemUnlock(pVM);
2026 }
2027 }
2028#endif
2029
2030 /* check that we got them all */
2031 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2032 }
2033
2034 /*
2035 * Normal priority then. (per-VCPU)
2036 * (Executed in no particular order.)
2037 */
2038 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
2039 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2040 {
2041 /*
2042 * Requests from other threads.
2043 */
2044 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
2045 {
2046 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2047 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2048 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2049 {
2050 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2051 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2052 return rc2;
2053 }
2054 UPDATE_RC();
2055 /** @todo HACK ALERT! The following test is to make sure EM+TM
2056 * thinks the VM is stopped/reset before the next VM state change
2057 * is made. We need a better solution for this, or at least make it
2058 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2059 * VINF_EM_SUSPEND). */
2060 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2061 {
2062 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2063 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2064 return rc;
2065 }
2066 }
2067
2068 /* check that we got them all */
2069 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2070 }
2071
2072 /*
2073 * High priority pre execution chunk last.
2074 * (Executed in ascending priority order.)
2075 */
2076 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2077 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2078 {
2079 /*
2080 * Timers before interrupts.
2081 */
2082 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
2083 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2084 TMR3TimerQueuesDo(pVM);
2085
2086 /*
2087 * Pick up asynchronously posted interrupts into the APIC.
2088 */
2089 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2090 APICUpdatePendingInterrupts(pVCpu);
2091
2092 /*
2093 * The instruction following an emulated STI should *always* be executed!
2094 *
2095 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2096 * the eip is the same as the inhibited instr address. Before we
2097 * are able to execute this instruction in raw mode (iret to
2098 * guest code) an external interrupt might force a world switch
2099 * again. Possibly allowing a guest interrupt to be dispatched
2100 * in the process. This could break the guest. Sounds very
2101 * unlikely, but such timing sensitive problem are not as rare as
2102 * you might think.
2103 */
2104 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2105 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2106 {
2107 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2108 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2109 {
2110 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2111 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2112 }
2113 else
2114 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2115 }
2116
2117 /** @todo SMIs. If we implement SMIs, this is where they will have to be
2118 * delivered. */
2119
2120#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2121 /*
2122 * VMX Nested-guest APIC-write pending (can cause VM-exits).
2123 * Takes priority over even SMI and INIT signals.
2124 * See Intel spec. 29.4.3.2 "APIC-Write Emulation".
2125 */
2126 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_APIC_WRITE))
2127 {
2128 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitApicWrite(pVCpu));
2129 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2130 UPDATE_RC();
2131 }
2132
2133 /*
2134 * VMX Nested-guest monitor-trap flag (MTF) VM-exit.
2135 * Takes priority over "Traps on the previous instruction".
2136 * See Intel spec. 6.9 "Priority Among Simultaneous Exceptions And Interrupts".
2137 */
2138 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_MTF))
2139 {
2140 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_MTF, 0 /* uExitQual */));
2141 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
2142 UPDATE_RC();
2143 }
2144
2145 /*
2146 * VMX Nested-guest preemption timer VM-exit.
2147 * Takes priority over NMI-window VM-exits.
2148 */
2149 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_PREEMPT_TIMER))
2150 {
2151 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitPreemptTimer(pVCpu));
2152 if (rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE)
2153 UPDATE_RC();
2154 }
2155#endif
2156
2157 /*
2158 * Guest event injection.
2159 */
2160 bool fWakeupPending = false;
2161 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
2162 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
2163 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS) /* Interrupt shadows block both NMIs and interrupts. */
2164 && !TRPMHasTrap(pVCpu)) /* An event could already be scheduled for dispatching. */
2165 {
2166 bool fInVmxNonRootMode;
2167 bool fInSvmHwvirtMode;
2168 bool const fInNestedGuest = CPUMIsGuestInNestedHwvirtMode(&pVCpu->cpum.GstCtx);
2169 if (fInNestedGuest)
2170 {
2171 fInVmxNonRootMode = CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx);
2172 fInSvmHwvirtMode = CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx);
2173 }
2174 else
2175 {
2176 fInVmxNonRootMode = false;
2177 fInSvmHwvirtMode = false;
2178 }
2179
2180 bool fGif = CPUMGetGuestGif(&pVCpu->cpum.GstCtx);
2181#ifdef VBOX_WITH_RAW_MODE
2182 fGif &= !PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip);
2183#endif
2184 if (fGif)
2185 {
2186#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2187 /*
2188 * VMX NMI-window VM-exit.
2189 * Takes priority over non-maskable interrupts (NMIs).
2190 * Interrupt shadows block NMI-window VM-exits.
2191 * Any event that is already in TRPM (e.g. injected during VM-entry) takes priority.
2192 *
2193 * See Intel spec. 25.2 "Other Causes Of VM Exits".
2194 * See Intel spec. 26.7.6 "NMI-Window Exiting".
2195 */
2196 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_NMI_WINDOW)
2197 && !CPUMIsGuestVmxVirtNmiBlocking(pVCpu, &pVCpu->cpum.GstCtx))
2198 {
2199 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_NMI_WINDOW_EXIT));
2200 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_NMI_WINDOW, 0 /* uExitQual */));
2201 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2202 && rc2 != VINF_PGM_CHANGE_MODE
2203 && rc2 != VINF_VMX_VMEXIT
2204 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2205 UPDATE_RC();
2206 }
2207 else
2208#endif
2209 /*
2210 * NMIs (take priority over external interrupts).
2211 */
2212 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)
2213 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_BLOCK_NMIS))
2214 {
2215#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2216 if ( fInVmxNonRootMode
2217 && CPUMIsGuestVmxPinCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PIN_CTLS_NMI_EXIT))
2218 {
2219 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexitXcptNmi(pVCpu));
2220 Assert(rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE);
2221 UPDATE_RC();
2222 }
2223 else
2224#endif
2225#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2226 if ( fInSvmHwvirtMode
2227 && CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_NMI))
2228 {
2229 rc2 = VBOXSTRICTRC_VAL(IEMExecSvmVmexit(pVCpu, SVM_EXIT_NMI, 0 /* uExitInfo1 */, 0 /* uExitInfo2 */));
2230 AssertMsg( rc2 != VINF_PGM_CHANGE_MODE
2231 && rc2 != VINF_SVM_VMEXIT
2232 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2233 UPDATE_RC();
2234 }
2235 else
2236#endif
2237 {
2238 rc2 = TRPMAssertTrap(pVCpu, X86_XCPT_NMI, TRPM_TRAP);
2239 if (rc2 == VINF_SUCCESS)
2240 {
2241 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
2242 fWakeupPending = true;
2243 if (pVM->em.s.fIemExecutesAll)
2244 rc2 = VINF_EM_RESCHEDULE;
2245 else
2246 {
2247 rc2 = HMR3IsActive(pVCpu) ? VINF_EM_RESCHEDULE_HM
2248 : VM_IS_NEM_ENABLED(pVM) ? VINF_EM_RESCHEDULE
2249 : VINF_EM_RESCHEDULE_REM;
2250 }
2251 }
2252 UPDATE_RC();
2253 }
2254 }
2255#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2256 /*
2257 * VMX Interrupt-window VM-exits.
2258 * Takes priority over external interrupts.
2259 */
2260 else if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_VMX_INT_WINDOW)
2261 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2262 {
2263 Assert(CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT));
2264 rc2 = VBOXSTRICTRC_VAL(IEMExecVmxVmexit(pVCpu, VMX_EXIT_INT_WINDOW, 0 /* uExitQual */));
2265 AssertMsg( rc2 != VINF_VMX_INTERCEPT_NOT_ACTIVE
2266 && rc2 != VINF_PGM_CHANGE_MODE
2267 && rc2 != VINF_VMX_VMEXIT
2268 && rc2 != VINF_NO_CHANGE, ("%Rrc\n", rc2));
2269 UPDATE_RC();
2270 }
2271#endif
2272#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2273 /** @todo NSTSVM: Handle this for SVM here too later not when an interrupt is
2274 * actually pending like we currently do. */
2275#endif
2276 /*
2277 * External interrupts.
2278 */
2279 else
2280 {
2281 /*
2282 * VMX: virtual interrupts takes priority over physical interrupts.
2283 * SVM: physical interrupts takes priority over virtual interrupts.
2284 */
2285 if ( fInVmxNonRootMode
2286 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2287 && CPUMIsGuestVmxVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2288 {
2289 /** @todo NSTVMX: virtual-interrupt delivery. */
2290 rc2 = VINF_SUCCESS;
2291 }
2292 else if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2293 && CPUMIsGuestPhysIntrEnabled(pVCpu))
2294 {
2295 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2296 if (fInVmxNonRootMode)
2297 rc2 = emR3VmxNstGstIntrIntercept(pVCpu);
2298 else if (fInSvmHwvirtMode)
2299 rc2 = emR3SvmNstGstIntrIntercept(pVCpu);
2300 else
2301 rc2 = VINF_NO_CHANGE;
2302
2303 if (rc2 == VINF_NO_CHANGE)
2304 {
2305 bool fInjected = false;
2306 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2307 /** @todo this really isn't nice, should properly handle this */
2308 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT, &fInjected);
2309 fWakeupPending = true;
2310 if ( pVM->em.s.fIemExecutesAll
2311 && ( rc2 == VINF_EM_RESCHEDULE_REM
2312 || rc2 == VINF_EM_RESCHEDULE_HM
2313 || rc2 == VINF_EM_RESCHEDULE_RAW))
2314 {
2315 rc2 = VINF_EM_RESCHEDULE;
2316 }
2317#ifdef VBOX_STRICT
2318 if (fInjected)
2319 rcIrq = rc2;
2320#endif
2321 }
2322 UPDATE_RC();
2323 }
2324 else if ( fInSvmHwvirtMode
2325 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
2326 && CPUMIsGuestSvmVirtIntrEnabled(pVCpu, &pVCpu->cpum.GstCtx))
2327 {
2328 rc2 = emR3SvmNstGstVirtIntrIntercept(pVCpu);
2329 if (rc2 == VINF_NO_CHANGE)
2330 {
2331 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2332 uint8_t const uNstGstVector = CPUMGetGuestSvmVirtIntrVector(&pVCpu->cpum.GstCtx);
2333 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR %#x\n", uNstGstVector));
2334 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2335 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2336 rc2 = VINF_EM_RESCHEDULE;
2337#ifdef VBOX_STRICT
2338 rcIrq = rc2;
2339#endif
2340 }
2341 UPDATE_RC();
2342 }
2343 }
2344 }
2345 }
2346
2347 /*
2348 * Allocate handy pages.
2349 */
2350 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2351 {
2352 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2353 UPDATE_RC();
2354 }
2355
2356 /*
2357 * Debugger Facility request.
2358 */
2359 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2360 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2361 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2362 {
2363 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2364 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2365 UPDATE_RC();
2366 }
2367
2368 /*
2369 * EMT Rendezvous (must be serviced before termination).
2370 */
2371 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2372 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2373 {
2374 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2375 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2376 UPDATE_RC();
2377 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2378 * stopped/reset before the next VM state change is made. We need a better
2379 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2380 * && rc >= VINF_EM_SUSPEND). */
2381 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2382 {
2383 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2384 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2385 return rc;
2386 }
2387 }
2388
2389 /*
2390 * State change request (cleared by vmR3SetStateLocked).
2391 */
2392 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2393 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2394 {
2395 VMSTATE enmState = VMR3GetState(pVM);
2396 switch (enmState)
2397 {
2398 case VMSTATE_FATAL_ERROR:
2399 case VMSTATE_FATAL_ERROR_LS:
2400 case VMSTATE_GURU_MEDITATION:
2401 case VMSTATE_GURU_MEDITATION_LS:
2402 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2403 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2404 return VINF_EM_SUSPEND;
2405
2406 case VMSTATE_DESTROYING:
2407 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2408 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2409 return VINF_EM_TERMINATE;
2410
2411 default:
2412 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2413 }
2414 }
2415
2416 /*
2417 * Out of memory? Since most of our fellow high priority actions may cause us
2418 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2419 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2420 * than us since we can terminate without allocating more memory.
2421 */
2422 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2423 {
2424 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2425 UPDATE_RC();
2426 if (rc == VINF_EM_NO_MEMORY)
2427 return rc;
2428 }
2429
2430 /*
2431 * If the virtual sync clock is still stopped, make TM restart it.
2432 */
2433 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2434 TMR3VirtualSyncFF(pVM, pVCpu);
2435
2436#ifdef DEBUG
2437 /*
2438 * Debug, pause the VM.
2439 */
2440 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2441 {
2442 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2443 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2444 return VINF_EM_SUSPEND;
2445 }
2446#endif
2447
2448 /* check that we got them all */
2449 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2450 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VMCPU_FF_VMX_MTF | VMCPU_FF_VMX_APIC_WRITE | VMCPU_FF_VMX_PREEMPT_TIMER | VMCPU_FF_VMX_INT_WINDOW | VMCPU_FF_VMX_NMI_WINDOW | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2451 }
2452
2453#undef UPDATE_RC
2454 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2455 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2456 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2457 return rc;
2458}
2459
2460
2461/**
2462 * Check if the preset execution time cap restricts guest execution scheduling.
2463 *
2464 * @returns true if allowed, false otherwise
2465 * @param pVM The cross context VM structure.
2466 * @param pVCpu The cross context virtual CPU structure.
2467 */
2468bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2469{
2470 uint64_t u64UserTime, u64KernelTime;
2471
2472 if ( pVM->uCpuExecutionCap != 100
2473 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2474 {
2475 uint64_t u64TimeNow = RTTimeMilliTS();
2476 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2477 {
2478 /* New time slice. */
2479 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2480 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2481 pVCpu->em.s.u64TimeSliceExec = 0;
2482 }
2483 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2484
2485 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2486 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2487 return false;
2488 }
2489 return true;
2490}
2491
2492
2493/**
2494 * Execute VM.
2495 *
2496 * This function is the main loop of the VM. The emulation thread
2497 * calls this function when the VM has been successfully constructed
2498 * and we're ready for executing the VM.
2499 *
2500 * Returning from this function means that the VM is turned off or
2501 * suspended (state already saved) and deconstruction is next in line.
2502 *
2503 * All interaction from other thread are done using forced actions
2504 * and signalling of the wait object.
2505 *
2506 * @returns VBox status code, informational status codes may indicate failure.
2507 * @param pVM The cross context VM structure.
2508 * @param pVCpu The cross context virtual CPU structure.
2509 */
2510VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2511{
2512 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2513 pVM,
2514 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2515 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2516 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2517 pVCpu->em.s.fForceRAW));
2518 VM_ASSERT_EMT(pVM);
2519 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2520 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2521 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2522 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2523
2524 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2525 if (rc == 0)
2526 {
2527 /*
2528 * Start the virtual time.
2529 */
2530 TMR3NotifyResume(pVM, pVCpu);
2531
2532 /*
2533 * The Outer Main Loop.
2534 */
2535 bool fFFDone = false;
2536
2537 /* Reschedule right away to start in the right state. */
2538 rc = VINF_SUCCESS;
2539
2540 /* If resuming after a pause or a state load, restore the previous
2541 state or else we'll start executing code. Else, just reschedule. */
2542 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2543 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2544 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2545 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2546 else
2547 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2548 pVCpu->em.s.cIemThenRemInstructions = 0;
2549 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2550
2551 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2552 for (;;)
2553 {
2554 /*
2555 * Before we can schedule anything (we're here because
2556 * scheduling is required) we must service any pending
2557 * forced actions to avoid any pending action causing
2558 * immediate rescheduling upon entering an inner loop
2559 *
2560 * Do forced actions.
2561 */
2562 if ( !fFFDone
2563 && RT_SUCCESS(rc)
2564 && rc != VINF_EM_TERMINATE
2565 && rc != VINF_EM_OFF
2566 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2567 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2568 {
2569 rc = emR3ForcedActions(pVM, pVCpu, rc);
2570 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2571 if ( ( rc == VINF_EM_RESCHEDULE_REM
2572 || rc == VINF_EM_RESCHEDULE_HM)
2573 && pVCpu->em.s.fForceRAW)
2574 rc = VINF_EM_RESCHEDULE_RAW;
2575 }
2576 else if (fFFDone)
2577 fFFDone = false;
2578
2579 /*
2580 * Now what to do?
2581 */
2582 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2583 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2584 switch (rc)
2585 {
2586 /*
2587 * Keep doing what we're currently doing.
2588 */
2589 case VINF_SUCCESS:
2590 break;
2591
2592 /*
2593 * Reschedule - to raw-mode execution.
2594 */
2595/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2596 case VINF_EM_RESCHEDULE_RAW:
2597 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2598 if (VM_IS_RAW_MODE_ENABLED(pVM))
2599 {
2600 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2601 pVCpu->em.s.enmState = EMSTATE_RAW;
2602 }
2603 else
2604 {
2605 AssertLogRelFailed();
2606 pVCpu->em.s.enmState = EMSTATE_NONE;
2607 }
2608 break;
2609
2610 /*
2611 * Reschedule - to HM or NEM.
2612 */
2613 case VINF_EM_RESCHEDULE_HM:
2614 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2615 Assert(!pVCpu->em.s.fForceRAW);
2616 if (VM_IS_HM_ENABLED(pVM))
2617 {
2618 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2619 pVCpu->em.s.enmState = EMSTATE_HM;
2620 }
2621 else if (VM_IS_NEM_ENABLED(pVM))
2622 {
2623 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2624 pVCpu->em.s.enmState = EMSTATE_NEM;
2625 }
2626 else
2627 {
2628 AssertLogRelFailed();
2629 pVCpu->em.s.enmState = EMSTATE_NONE;
2630 }
2631 break;
2632
2633 /*
2634 * Reschedule - to recompiled execution.
2635 */
2636 case VINF_EM_RESCHEDULE_REM:
2637 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2638 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2639 {
2640 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2641 enmOldState, EMSTATE_IEM_THEN_REM));
2642 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2643 {
2644 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2645 pVCpu->em.s.cIemThenRemInstructions = 0;
2646 }
2647 }
2648 else
2649 {
2650 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2651 pVCpu->em.s.enmState = EMSTATE_REM;
2652 }
2653 break;
2654
2655 /*
2656 * Resume.
2657 */
2658 case VINF_EM_RESUME:
2659 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2660 /* Don't reschedule in the halted or wait for SIPI case. */
2661 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2662 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2663 {
2664 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2665 break;
2666 }
2667 /* fall through and get scheduled. */
2668 RT_FALL_THRU();
2669
2670 /*
2671 * Reschedule.
2672 */
2673 case VINF_EM_RESCHEDULE:
2674 {
2675 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2676 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2677 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2678 pVCpu->em.s.cIemThenRemInstructions = 0;
2679 pVCpu->em.s.enmState = enmState;
2680 break;
2681 }
2682
2683 /*
2684 * Halted.
2685 */
2686 case VINF_EM_HALT:
2687 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2688 pVCpu->em.s.enmState = EMSTATE_HALTED;
2689 break;
2690
2691 /*
2692 * Switch to the wait for SIPI state (application processor only)
2693 */
2694 case VINF_EM_WAIT_SIPI:
2695 Assert(pVCpu->idCpu != 0);
2696 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2697 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2698 break;
2699
2700
2701 /*
2702 * Suspend.
2703 */
2704 case VINF_EM_SUSPEND:
2705 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2706 Assert(enmOldState != EMSTATE_SUSPENDED);
2707 pVCpu->em.s.enmPrevState = enmOldState;
2708 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2709 break;
2710
2711 /*
2712 * Reset.
2713 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2714 */
2715 case VINF_EM_RESET:
2716 {
2717 if (pVCpu->idCpu == 0)
2718 {
2719 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2720 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2721 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2722 pVCpu->em.s.cIemThenRemInstructions = 0;
2723 pVCpu->em.s.enmState = enmState;
2724 }
2725 else
2726 {
2727 /* All other VCPUs go into the wait for SIPI state. */
2728 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2729 }
2730 break;
2731 }
2732
2733 /*
2734 * Power Off.
2735 */
2736 case VINF_EM_OFF:
2737 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2738 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2739 TMR3NotifySuspend(pVM, pVCpu);
2740 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2741 return rc;
2742
2743 /*
2744 * Terminate the VM.
2745 */
2746 case VINF_EM_TERMINATE:
2747 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2748 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2749 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2750 TMR3NotifySuspend(pVM, pVCpu);
2751 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2752 return rc;
2753
2754
2755 /*
2756 * Out of memory, suspend the VM and stuff.
2757 */
2758 case VINF_EM_NO_MEMORY:
2759 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2760 Assert(enmOldState != EMSTATE_SUSPENDED);
2761 pVCpu->em.s.enmPrevState = enmOldState;
2762 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2763 TMR3NotifySuspend(pVM, pVCpu);
2764 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2765
2766 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2767 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2768 if (rc != VINF_EM_SUSPEND)
2769 {
2770 if (RT_SUCCESS_NP(rc))
2771 {
2772 AssertLogRelMsgFailed(("%Rrc\n", rc));
2773 rc = VERR_EM_INTERNAL_ERROR;
2774 }
2775 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2776 }
2777 return rc;
2778
2779 /*
2780 * Guest debug events.
2781 */
2782 case VINF_EM_DBG_STEPPED:
2783 case VINF_EM_DBG_STOP:
2784 case VINF_EM_DBG_EVENT:
2785 case VINF_EM_DBG_BREAKPOINT:
2786 case VINF_EM_DBG_STEP:
2787 if (enmOldState == EMSTATE_RAW)
2788 {
2789 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2790 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2791 }
2792 else if (enmOldState == EMSTATE_HM)
2793 {
2794 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2795 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2796 }
2797 else if (enmOldState == EMSTATE_NEM)
2798 {
2799 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2800 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2801 }
2802 else if (enmOldState == EMSTATE_REM)
2803 {
2804 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2805 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2806 }
2807 else
2808 {
2809 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2810 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2811 }
2812 break;
2813
2814 /*
2815 * Hypervisor debug events.
2816 */
2817 case VINF_EM_DBG_HYPER_STEPPED:
2818 case VINF_EM_DBG_HYPER_BREAKPOINT:
2819 case VINF_EM_DBG_HYPER_ASSERTION:
2820 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2821 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2822 break;
2823
2824 /*
2825 * Triple fault.
2826 */
2827 case VINF_EM_TRIPLE_FAULT:
2828 if (!pVM->em.s.fGuruOnTripleFault)
2829 {
2830 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2831 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2832 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2833 continue;
2834 }
2835 /* Else fall through and trigger a guru. */
2836 RT_FALL_THRU();
2837
2838 case VERR_VMM_RING0_ASSERTION:
2839 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2840 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2841 break;
2842
2843 /*
2844 * Any error code showing up here other than the ones we
2845 * know and process above are considered to be FATAL.
2846 *
2847 * Unknown warnings and informational status codes are also
2848 * included in this.
2849 */
2850 default:
2851 if (RT_SUCCESS_NP(rc))
2852 {
2853 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2854 rc = VERR_EM_INTERNAL_ERROR;
2855 }
2856 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2857 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2858 break;
2859 }
2860
2861 /*
2862 * Act on state transition.
2863 */
2864 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2865 if (enmOldState != enmNewState)
2866 {
2867 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2868
2869 /* Clear MWait flags and the unhalt FF. */
2870 if ( enmOldState == EMSTATE_HALTED
2871 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2872 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2873 && ( enmNewState == EMSTATE_RAW
2874 || enmNewState == EMSTATE_HM
2875 || enmNewState == EMSTATE_NEM
2876 || enmNewState == EMSTATE_REM
2877 || enmNewState == EMSTATE_IEM_THEN_REM
2878 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2879 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2880 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2881 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2882 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2883 {
2884 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2885 {
2886 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2887 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2888 }
2889 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2890 {
2891 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2892 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2893 }
2894 }
2895 }
2896 else
2897 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2898
2899 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2900 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2901
2902 /*
2903 * Act on the new state.
2904 */
2905 switch (enmNewState)
2906 {
2907 /*
2908 * Execute raw.
2909 */
2910 case EMSTATE_RAW:
2911#ifdef VBOX_WITH_RAW_MODE
2912 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2913#else
2914 AssertLogRelMsgFailed(("%Rrc\n", rc));
2915 rc = VERR_EM_INTERNAL_ERROR;
2916#endif
2917 break;
2918
2919 /*
2920 * Execute hardware accelerated raw.
2921 */
2922 case EMSTATE_HM:
2923 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2924 break;
2925
2926 /*
2927 * Execute hardware accelerated raw.
2928 */
2929 case EMSTATE_NEM:
2930 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2931 break;
2932
2933 /*
2934 * Execute recompiled.
2935 */
2936 case EMSTATE_REM:
2937 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2938 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2939 break;
2940
2941 /*
2942 * Execute in the interpreter.
2943 */
2944 case EMSTATE_IEM:
2945 {
2946 uint32_t cInstructions = 0;
2947#if 0 /* For testing purposes. */
2948 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2949 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2950 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2951 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2952 rc = VINF_SUCCESS;
2953 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2954#endif
2955 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, 4096 /*cMaxInstructions*/, 2047 /*cPollRate*/, &cInstructions));
2956 if (pVM->em.s.fIemExecutesAll)
2957 {
2958 Assert(rc != VINF_EM_RESCHEDULE_REM);
2959 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2960 Assert(rc != VINF_EM_RESCHEDULE_HM);
2961#ifdef VBOX_HIGH_RES_TIMERS_HACK
2962 if (cInstructions < 2048)
2963 TMTimerPollVoid(pVM, pVCpu);
2964#endif
2965 }
2966 fFFDone = false;
2967 break;
2968 }
2969
2970 /*
2971 * Execute in IEM, hoping we can quickly switch aback to HM
2972 * or RAW execution. If our hopes fail, we go to REM.
2973 */
2974 case EMSTATE_IEM_THEN_REM:
2975 {
2976 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2977 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2978 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2979 break;
2980 }
2981
2982 /*
2983 * Application processor execution halted until SIPI.
2984 */
2985 case EMSTATE_WAIT_SIPI:
2986 /* no break */
2987 /*
2988 * hlt - execution halted until interrupt.
2989 */
2990 case EMSTATE_HALTED:
2991 {
2992 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2993 /* If HM (or someone else) store a pending interrupt in
2994 TRPM, it must be dispatched ASAP without any halting.
2995 Anything pending in TRPM has been accepted and the CPU
2996 should already be the right state to receive it. */
2997 if (TRPMHasTrap(pVCpu))
2998 rc = VINF_EM_RESCHEDULE;
2999 /* MWAIT has a special extension where it's woken up when
3000 an interrupt is pending even when IF=0. */
3001 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
3002 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
3003 {
3004 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
3005 if (rc == VINF_SUCCESS)
3006 {
3007 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3008 APICUpdatePendingInterrupts(pVCpu);
3009
3010 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3011 | VMCPU_FF_INTERRUPT_NESTED_GUEST
3012 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
3013 {
3014 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
3015 rc = VINF_EM_RESCHEDULE;
3016 }
3017 }
3018 }
3019 else
3020 {
3021 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
3022 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
3023 check VMCPU_FF_UPDATE_APIC here. */
3024 if ( rc == VINF_SUCCESS
3025 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
3026 {
3027 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
3028 rc = VINF_EM_RESCHEDULE;
3029 }
3030 }
3031
3032 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
3033 break;
3034 }
3035
3036 /*
3037 * Suspended - return to VM.cpp.
3038 */
3039 case EMSTATE_SUSPENDED:
3040 TMR3NotifySuspend(pVM, pVCpu);
3041 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3042 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3043 return VINF_EM_SUSPEND;
3044
3045 /*
3046 * Debugging in the guest.
3047 */
3048 case EMSTATE_DEBUG_GUEST_RAW:
3049 case EMSTATE_DEBUG_GUEST_HM:
3050 case EMSTATE_DEBUG_GUEST_NEM:
3051 case EMSTATE_DEBUG_GUEST_IEM:
3052 case EMSTATE_DEBUG_GUEST_REM:
3053 TMR3NotifySuspend(pVM, pVCpu);
3054 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3055 TMR3NotifyResume(pVM, pVCpu);
3056 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3057 break;
3058
3059 /*
3060 * Debugging in the hypervisor.
3061 */
3062 case EMSTATE_DEBUG_HYPER:
3063 {
3064 TMR3NotifySuspend(pVM, pVCpu);
3065 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3066
3067 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3068 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3069 if (rc != VINF_SUCCESS)
3070 {
3071 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
3072 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3073 else
3074 {
3075 /* switch to guru meditation mode */
3076 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3077 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3078 VMMR3FatalDump(pVM, pVCpu, rc);
3079 }
3080 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3081 return rc;
3082 }
3083
3084 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3085 TMR3NotifyResume(pVM, pVCpu);
3086 break;
3087 }
3088
3089 /*
3090 * Guru meditation takes place in the debugger.
3091 */
3092 case EMSTATE_GURU_MEDITATION:
3093 {
3094 TMR3NotifySuspend(pVM, pVCpu);
3095 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3096 VMMR3FatalDump(pVM, pVCpu, rc);
3097 emR3Debug(pVM, pVCpu, rc);
3098 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3099 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3100 return rc;
3101 }
3102
3103 /*
3104 * The states we don't expect here.
3105 */
3106 case EMSTATE_NONE:
3107 case EMSTATE_TERMINATING:
3108 default:
3109 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3110 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3111 TMR3NotifySuspend(pVM, pVCpu);
3112 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3113 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3114 return VERR_EM_INTERNAL_ERROR;
3115 }
3116 } /* The Outer Main Loop */
3117 }
3118 else
3119 {
3120 /*
3121 * Fatal error.
3122 */
3123 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3124 TMR3NotifySuspend(pVM, pVCpu);
3125 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3126 VMMR3FatalDump(pVM, pVCpu, rc);
3127 emR3Debug(pVM, pVCpu, rc);
3128 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3129 /** @todo change the VM state! */
3130 return rc;
3131 }
3132
3133 /* not reached */
3134}
3135
3136/**
3137 * Notify EM of a state change (used by FTM)
3138 *
3139 * @param pVM The cross context VM structure.
3140 */
3141VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3142{
3143 PVMCPU pVCpu = VMMGetCpu(pVM);
3144
3145 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3146 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3147 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3148 return VINF_SUCCESS;
3149}
3150
3151/**
3152 * Notify EM of a state change (used by FTM)
3153 *
3154 * @param pVM The cross context VM structure.
3155 */
3156VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3157{
3158 PVMCPU pVCpu = VMMGetCpu(pVM);
3159 EMSTATE enmCurState = pVCpu->em.s.enmState;
3160
3161 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3162 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3163 pVCpu->em.s.enmPrevState = enmCurState;
3164 return VINF_SUCCESS;
3165}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette