VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 75681

Last change on this file since 75681 was 75681, checked in by vboxsync, 6 years ago

VMM: Nested SVM: bugref:7243 VMCPU_FF_INTERRUPT_NESTED_GUEST is similar to VMCPU_FF_INTERRUPT_APIC. It belongs in the high-priority mask.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 122.5 KB
Line 
1/* $Id: EM.cpp 75681 2018-11-23 07:32:44Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Internal Functions *
79*********************************************************************************************************************************/
80static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
81static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
82#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
83static const char *emR3GetStateName(EMSTATE enmState);
84#endif
85static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
86#if defined(VBOX_WITH_REM) || defined(DEBUG)
87static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
88#endif
89static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
90
91
92/**
93 * Initializes the EM.
94 *
95 * @returns VBox status code.
96 * @param pVM The cross context VM structure.
97 */
98VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
99{
100 LogFlow(("EMR3Init\n"));
101 /*
102 * Assert alignment and sizes.
103 */
104 AssertCompileMemberAlignment(VM, em.s, 32);
105 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
106 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
107
108 /*
109 * Init the structure.
110 */
111 pVM->em.s.offVM = RT_UOFFSETOF(VM, em.s);
112 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
113 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
114
115 bool fEnabled;
116 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
117 AssertLogRelRCReturn(rc, rc);
118 pVM->fRecompileUser = !fEnabled;
119
120 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
121 AssertLogRelRCReturn(rc, rc);
122 pVM->fRecompileSupervisor = !fEnabled;
123
124#ifdef VBOX_WITH_RAW_RING1
125 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
126 AssertLogRelRCReturn(rc, rc);
127#else
128 pVM->fRawRing1Enabled = false; /* Disabled by default. */
129#endif
130
131 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
132 AssertLogRelRCReturn(rc, rc);
133
134 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
135 AssertLogRelRCReturn(rc, rc);
136 pVM->em.s.fGuruOnTripleFault = !fEnabled;
137 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
138 {
139 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
140 pVM->em.s.fGuruOnTripleFault = true;
141 }
142
143 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
144 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
145
146 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
147 * Whether to try correlate exit history in any context, detect hot spots and
148 * try optimize these using IEM if there are other exits close by. This
149 * overrides the context specific settings. */
150 bool fExitOptimizationEnabled = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
152 AssertLogRelRCReturn(rc, rc);
153
154 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
155 * Whether to optimize exits in ring-0. Setting this to false will also disable
156 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
157 * capabilities of the host kernel, this optimization may be unavailable. */
158 bool fExitOptimizationEnabledR0 = true;
159 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
160 AssertLogRelRCReturn(rc, rc);
161 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
162
163 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
164 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
165 * hooks are in effect). */
166 /** @todo change the default to true here */
167 bool fExitOptimizationEnabledR0PreemptDisabled = true;
168 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
169 AssertLogRelRCReturn(rc, rc);
170 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
171
172 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
173 * Maximum number of instruction to let EMHistoryExec execute in one go. */
174 uint16_t cHistoryExecMaxInstructions = 8192;
175 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
176 AssertLogRelRCReturn(rc, rc);
177 if (cHistoryExecMaxInstructions < 16)
178 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
179
180 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
181 * Maximum number of instruction between exits during probing. */
182 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
183#ifdef RT_OS_WINDOWS
184 if (VM_IS_NEM_ENABLED(pVM))
185 cHistoryProbeMaxInstructionsWithoutExit = 32;
186#endif
187 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
188 cHistoryProbeMaxInstructionsWithoutExit);
189 AssertLogRelRCReturn(rc, rc);
190 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
191 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
192 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
193
194 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
195 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
196 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
197 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
198 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
199 cHistoryProbeMinInstructions);
200 AssertLogRelRCReturn(rc, rc);
201
202 for (VMCPUID i = 0; i < pVM->cCpus; i++)
203 {
204 pVM->aCpus[i].em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
205 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
206 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
207
208 pVM->aCpus[i].em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
209 pVM->aCpus[i].em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
210 pVM->aCpus[i].em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
211 }
212
213#ifdef VBOX_WITH_REM
214 /*
215 * Initialize the REM critical section.
216 */
217 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
218 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
219 AssertRCReturn(rc, rc);
220#endif
221
222 /*
223 * Saved state.
224 */
225 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
226 NULL, NULL, NULL,
227 NULL, emR3Save, NULL,
228 NULL, emR3Load, NULL);
229 if (RT_FAILURE(rc))
230 return rc;
231
232 for (VMCPUID i = 0; i < pVM->cCpus; i++)
233 {
234 PVMCPU pVCpu = &pVM->aCpus[i];
235
236 pVCpu->em.s.enmState = i == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
237 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
238 pVCpu->em.s.fForceRAW = false;
239 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
240 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
241
242#ifdef VBOX_WITH_RAW_MODE
243 if (VM_IS_RAW_MODE_ENABLED(pVM))
244 {
245 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
246 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
247 }
248#endif
249
250# define EM_REG_COUNTER(a, b, c) \
251 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
252 AssertRC(rc);
253
254# define EM_REG_COUNTER_USED(a, b, c) \
255 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
256 AssertRC(rc);
257
258# define EM_REG_PROFILE(a, b, c) \
259 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
260 AssertRC(rc);
261
262# define EM_REG_PROFILE_ADV(a, b, c) \
263 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
264 AssertRC(rc);
265
266 /*
267 * Statistics.
268 */
269#ifdef VBOX_WITH_STATISTICS
270 PEMSTATS pStats;
271 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
272 if (RT_FAILURE(rc))
273 return rc;
274
275 pVCpu->em.s.pStatsR3 = pStats;
276 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
277 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
278
279# if 1 /* rawmode only? */
280 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
281 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
282 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
283 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
284 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
285 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
286 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
287 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
288 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
289 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
290 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
291 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
292 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
293 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
294 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
295 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
296 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
297 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
298 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
299 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
300 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
301 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
302 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
303 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
304 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
305 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
306 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
307#endif
308 pVCpu->em.s.pCliStatTree = 0;
309
310 /* these should be considered for release statistics. */
311 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
312 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
313 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
314 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
315 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
316 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
317 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
318 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
319#endif /* VBOX_WITH_STATISTICS */
320 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
321 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
322#ifdef VBOX_WITH_STATISTICS
323 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
324 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
325 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
326 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
327 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
328 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
329#endif /* VBOX_WITH_STATISTICS */
330
331 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
332 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
333 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
334 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
335 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
336
337 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
338
339 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
340 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", i);
341 AssertRC(rc);
342
343 /* History record statistics */
344 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
345 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", i);
346 AssertRC(rc);
347
348 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
349 {
350 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
351 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", i, iStep);
352 AssertRC(rc);
353 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
354 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", i, iStep);
355 AssertRC(rc);
356 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
357 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", i, iStep);
358 AssertRC(rc);
359 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
360 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", i, iStep);
361 AssertRC(rc);
362 }
363
364 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%d/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
365 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%d/ExitOpt/ExecSavedExit", "Net number of saved exits.");
366 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%d/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
367 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%d/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
368 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%d/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
369 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%d/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
370 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%d/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
371 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%d/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
372 }
373
374 emR3InitDbg(pVM);
375 return VINF_SUCCESS;
376}
377
378
379/**
380 * Called when a VM initialization stage is completed.
381 *
382 * @returns VBox status code.
383 * @param pVM The cross context VM structure.
384 * @param enmWhat The initialization state that was completed.
385 */
386VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
387{
388 if (enmWhat == VMINITCOMPLETED_RING0)
389 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
390 pVM->aCpus[0].em.s.fExitOptimizationEnabled, pVM->aCpus[0].em.s.fExitOptimizationEnabledR0,
391 pVM->aCpus[0].em.s.fExitOptimizationEnabledR0PreemptDisabled));
392 return VINF_SUCCESS;
393}
394
395
396/**
397 * Applies relocations to data and code managed by this
398 * component. This function will be called at init and
399 * whenever the VMM need to relocate it self inside the GC.
400 *
401 * @param pVM The cross context VM structure.
402 */
403VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
404{
405 LogFlow(("EMR3Relocate\n"));
406 for (VMCPUID i = 0; i < pVM->cCpus; i++)
407 {
408 PVMCPU pVCpu = &pVM->aCpus[i];
409 if (pVCpu->em.s.pStatsR3)
410 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
411 }
412}
413
414
415/**
416 * Reset the EM state for a CPU.
417 *
418 * Called by EMR3Reset and hot plugging.
419 *
420 * @param pVCpu The cross context virtual CPU structure.
421 */
422VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
423{
424 /* Reset scheduling state. */
425 pVCpu->em.s.fForceRAW = false;
426 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
427
428 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
429 out of the HALTED state here so that enmPrevState doesn't end up as
430 HALTED when EMR3Execute returns. */
431 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
432 {
433 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
434 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
435 }
436}
437
438
439/**
440 * Reset notification.
441 *
442 * @param pVM The cross context VM structure.
443 */
444VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
445{
446 Log(("EMR3Reset: \n"));
447 for (VMCPUID i = 0; i < pVM->cCpus; i++)
448 EMR3ResetCpu(&pVM->aCpus[i]);
449}
450
451
452/**
453 * Terminates the EM.
454 *
455 * Termination means cleaning up and freeing all resources,
456 * the VM it self is at this point powered off or suspended.
457 *
458 * @returns VBox status code.
459 * @param pVM The cross context VM structure.
460 */
461VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
462{
463 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
464
465#ifdef VBOX_WITH_REM
466 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
467#else
468 RT_NOREF(pVM);
469#endif
470 return VINF_SUCCESS;
471}
472
473
474/**
475 * Execute state save operation.
476 *
477 * @returns VBox status code.
478 * @param pVM The cross context VM structure.
479 * @param pSSM SSM operation handle.
480 */
481static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
482{
483 for (VMCPUID i = 0; i < pVM->cCpus; i++)
484 {
485 PVMCPU pVCpu = &pVM->aCpus[i];
486
487 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
488
489 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
490 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
491 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
492
493 /* Save mwait state. */
494 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
495 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
496 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
497 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
498 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
499 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
500 AssertRCReturn(rc, rc);
501 }
502 return VINF_SUCCESS;
503}
504
505
506/**
507 * Execute state load operation.
508 *
509 * @returns VBox status code.
510 * @param pVM The cross context VM structure.
511 * @param pSSM SSM operation handle.
512 * @param uVersion Data layout version.
513 * @param uPass The data pass.
514 */
515static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
516{
517 /*
518 * Validate version.
519 */
520 if ( uVersion > EM_SAVED_STATE_VERSION
521 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
522 {
523 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
524 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
525 }
526 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
527
528 /*
529 * Load the saved state.
530 */
531 for (VMCPUID i = 0; i < pVM->cCpus; i++)
532 {
533 PVMCPU pVCpu = &pVM->aCpus[i];
534
535 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
536 if (RT_FAILURE(rc))
537 pVCpu->em.s.fForceRAW = false;
538 AssertRCReturn(rc, rc);
539
540 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
541 {
542 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
543 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
544 AssertRCReturn(rc, rc);
545 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
546
547 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
548 }
549 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
550 {
551 /* Load mwait state. */
552 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
553 AssertRCReturn(rc, rc);
554 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
555 AssertRCReturn(rc, rc);
556 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
557 AssertRCReturn(rc, rc);
558 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
559 AssertRCReturn(rc, rc);
560 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
561 AssertRCReturn(rc, rc);
562 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
563 AssertRCReturn(rc, rc);
564 }
565
566 Assert(!pVCpu->em.s.pCliStatTree);
567 }
568 return VINF_SUCCESS;
569}
570
571
572/**
573 * Argument packet for emR3SetExecutionPolicy.
574 */
575struct EMR3SETEXECPOLICYARGS
576{
577 EMEXECPOLICY enmPolicy;
578 bool fEnforce;
579};
580
581
582/**
583 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
584 */
585static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
586{
587 /*
588 * Only the first CPU changes the variables.
589 */
590 if (pVCpu->idCpu == 0)
591 {
592 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
593 switch (pArgs->enmPolicy)
594 {
595 case EMEXECPOLICY_RECOMPILE_RING0:
596 pVM->fRecompileSupervisor = pArgs->fEnforce;
597 break;
598 case EMEXECPOLICY_RECOMPILE_RING3:
599 pVM->fRecompileUser = pArgs->fEnforce;
600 break;
601 case EMEXECPOLICY_IEM_ALL:
602 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
603 break;
604 default:
605 AssertFailedReturn(VERR_INVALID_PARAMETER);
606 }
607 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
608 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
609 }
610
611 /*
612 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
613 */
614 return pVCpu->em.s.enmState == EMSTATE_RAW
615 || pVCpu->em.s.enmState == EMSTATE_HM
616 || pVCpu->em.s.enmState == EMSTATE_NEM
617 || pVCpu->em.s.enmState == EMSTATE_IEM
618 || pVCpu->em.s.enmState == EMSTATE_REM
619 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
620 ? VINF_EM_RESCHEDULE
621 : VINF_SUCCESS;
622}
623
624
625/**
626 * Changes an execution scheduling policy parameter.
627 *
628 * This is used to enable or disable raw-mode / hardware-virtualization
629 * execution of user and supervisor code.
630 *
631 * @returns VINF_SUCCESS on success.
632 * @returns VINF_RESCHEDULE if a rescheduling might be required.
633 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
634 *
635 * @param pUVM The user mode VM handle.
636 * @param enmPolicy The scheduling policy to change.
637 * @param fEnforce Whether to enforce the policy or not.
638 */
639VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
640{
641 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
642 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
643 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
644
645 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
646 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
647}
648
649
650/**
651 * Queries an execution scheduling policy parameter.
652 *
653 * @returns VBox status code
654 * @param pUVM The user mode VM handle.
655 * @param enmPolicy The scheduling policy to query.
656 * @param pfEnforced Where to return the current value.
657 */
658VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
659{
660 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
661 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
662 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
663 PVM pVM = pUVM->pVM;
664 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
665
666 /* No need to bother EMTs with a query. */
667 switch (enmPolicy)
668 {
669 case EMEXECPOLICY_RECOMPILE_RING0:
670 *pfEnforced = pVM->fRecompileSupervisor;
671 break;
672 case EMEXECPOLICY_RECOMPILE_RING3:
673 *pfEnforced = pVM->fRecompileUser;
674 break;
675 case EMEXECPOLICY_IEM_ALL:
676 *pfEnforced = pVM->em.s.fIemExecutesAll;
677 break;
678 default:
679 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
680 }
681
682 return VINF_SUCCESS;
683}
684
685
686/**
687 * Queries the main execution engine of the VM.
688 *
689 * @returns VBox status code
690 * @param pUVM The user mode VM handle.
691 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
692 */
693VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
694{
695 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
696 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
697
698 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
699 PVM pVM = pUVM->pVM;
700 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
701
702 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
703 return VINF_SUCCESS;
704}
705
706
707/**
708 * Raise a fatal error.
709 *
710 * Safely terminate the VM with full state report and stuff. This function
711 * will naturally never return.
712 *
713 * @param pVCpu The cross context virtual CPU structure.
714 * @param rc VBox status code.
715 */
716VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
717{
718 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
719 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
720}
721
722
723#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
724/**
725 * Gets the EM state name.
726 *
727 * @returns pointer to read only state name,
728 * @param enmState The state.
729 */
730static const char *emR3GetStateName(EMSTATE enmState)
731{
732 switch (enmState)
733 {
734 case EMSTATE_NONE: return "EMSTATE_NONE";
735 case EMSTATE_RAW: return "EMSTATE_RAW";
736 case EMSTATE_HM: return "EMSTATE_HM";
737 case EMSTATE_IEM: return "EMSTATE_IEM";
738 case EMSTATE_REM: return "EMSTATE_REM";
739 case EMSTATE_HALTED: return "EMSTATE_HALTED";
740 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
741 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
742 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
743 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
744 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
745 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
746 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
747 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
748 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
749 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
750 case EMSTATE_NEM: return "EMSTATE_NEM";
751 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
752 default: return "Unknown!";
753 }
754}
755#endif /* LOG_ENABLED || VBOX_STRICT */
756
757
758/**
759 * Handle pending ring-3 I/O port write.
760 *
761 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
762 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
763 *
764 * @returns Strict VBox status code.
765 * @param pVM The cross context VM structure.
766 * @param pVCpu The cross context virtual CPU structure.
767 */
768VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
769{
770 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
771
772 /* Get and clear the pending data. */
773 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
774 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
775 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
776 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
777 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
778
779 /* Assert sanity. */
780 switch (cbValue)
781 {
782 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
783 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
784 case 4: break;
785 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
786 }
787 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
788
789 /* Do the work.*/
790 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
791 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
792 if (IOM_SUCCESS(rcStrict))
793 {
794 pVCpu->cpum.GstCtx.rip += cbInstr;
795 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
796 }
797 return rcStrict;
798}
799
800
801/**
802 * Handle pending ring-3 I/O port write.
803 *
804 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
805 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
806 *
807 * @returns Strict VBox status code.
808 * @param pVM The cross context VM structure.
809 * @param pVCpu The cross context virtual CPU structure.
810 */
811VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
812{
813 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
814
815 /* Get and clear the pending data. */
816 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
817 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
818 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
819 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
820
821 /* Assert sanity. */
822 switch (cbValue)
823 {
824 case 1: break;
825 case 2: break;
826 case 4: break;
827 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
828 }
829 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
830 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
831
832 /* Do the work.*/
833 uint32_t uValue = 0;
834 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
835 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
836 if (IOM_SUCCESS(rcStrict))
837 {
838 if (cbValue == 4)
839 pVCpu->cpum.GstCtx.rax = uValue;
840 else if (cbValue == 2)
841 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
842 else
843 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
844 pVCpu->cpum.GstCtx.rip += cbInstr;
845 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
846 }
847 return rcStrict;
848}
849
850
851/**
852 * Debug loop.
853 *
854 * @returns VBox status code for EM.
855 * @param pVM The cross context VM structure.
856 * @param pVCpu The cross context virtual CPU structure.
857 * @param rc Current EM VBox status code.
858 */
859static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
860{
861 for (;;)
862 {
863 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
864 const VBOXSTRICTRC rcLast = rc;
865
866 /*
867 * Debug related RC.
868 */
869 switch (VBOXSTRICTRC_VAL(rc))
870 {
871 /*
872 * Single step an instruction.
873 */
874 case VINF_EM_DBG_STEP:
875 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
876 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
877 || pVCpu->em.s.fForceRAW /* paranoia */)
878#ifdef VBOX_WITH_RAW_MODE
879 rc = emR3RawStep(pVM, pVCpu);
880#else
881 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
882#endif
883 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
884 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
885 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
886 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
887#ifdef VBOX_WITH_REM
888 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
889 rc = emR3RemStep(pVM, pVCpu);
890#endif
891 else
892 {
893 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
894 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
895 rc = VINF_EM_DBG_STEPPED;
896 }
897 break;
898
899 /*
900 * Simple events: stepped, breakpoint, stop/assertion.
901 */
902 case VINF_EM_DBG_STEPPED:
903 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
904 break;
905
906 case VINF_EM_DBG_BREAKPOINT:
907 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
908 break;
909
910 case VINF_EM_DBG_STOP:
911 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
912 break;
913
914 case VINF_EM_DBG_EVENT:
915 rc = DBGFR3EventHandlePending(pVM, pVCpu);
916 break;
917
918 case VINF_EM_DBG_HYPER_STEPPED:
919 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
920 break;
921
922 case VINF_EM_DBG_HYPER_BREAKPOINT:
923 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
924 break;
925
926 case VINF_EM_DBG_HYPER_ASSERTION:
927 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
928 RTLogFlush(NULL);
929 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
930 break;
931
932 /*
933 * Guru meditation.
934 */
935 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
936 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
937 break;
938 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
939 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
940 break;
941 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
942 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
943 break;
944
945 default: /** @todo don't use default for guru, but make special errors code! */
946 {
947 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
948 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
949 break;
950 }
951 }
952
953 /*
954 * Process the result.
955 */
956 switch (VBOXSTRICTRC_VAL(rc))
957 {
958 /*
959 * Continue the debugging loop.
960 */
961 case VINF_EM_DBG_STEP:
962 case VINF_EM_DBG_STOP:
963 case VINF_EM_DBG_EVENT:
964 case VINF_EM_DBG_STEPPED:
965 case VINF_EM_DBG_BREAKPOINT:
966 case VINF_EM_DBG_HYPER_STEPPED:
967 case VINF_EM_DBG_HYPER_BREAKPOINT:
968 case VINF_EM_DBG_HYPER_ASSERTION:
969 break;
970
971 /*
972 * Resuming execution (in some form) has to be done here if we got
973 * a hypervisor debug event.
974 */
975 case VINF_SUCCESS:
976 case VINF_EM_RESUME:
977 case VINF_EM_SUSPEND:
978 case VINF_EM_RESCHEDULE:
979 case VINF_EM_RESCHEDULE_RAW:
980 case VINF_EM_RESCHEDULE_REM:
981 case VINF_EM_HALT:
982 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
983 {
984#ifdef VBOX_WITH_RAW_MODE
985 rc = emR3RawResumeHyper(pVM, pVCpu);
986 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
987 continue;
988#else
989 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
990#endif
991 }
992 if (rc == VINF_SUCCESS)
993 rc = VINF_EM_RESCHEDULE;
994 return rc;
995
996 /*
997 * The debugger isn't attached.
998 * We'll simply turn the thing off since that's the easiest thing to do.
999 */
1000 case VERR_DBGF_NOT_ATTACHED:
1001 switch (VBOXSTRICTRC_VAL(rcLast))
1002 {
1003 case VINF_EM_DBG_HYPER_STEPPED:
1004 case VINF_EM_DBG_HYPER_BREAKPOINT:
1005 case VINF_EM_DBG_HYPER_ASSERTION:
1006 case VERR_TRPM_PANIC:
1007 case VERR_TRPM_DONT_PANIC:
1008 case VERR_VMM_RING0_ASSERTION:
1009 case VERR_VMM_HYPER_CR3_MISMATCH:
1010 case VERR_VMM_RING3_CALL_DISABLED:
1011 return rcLast;
1012 }
1013 return VINF_EM_OFF;
1014
1015 /*
1016 * Status codes terminating the VM in one or another sense.
1017 */
1018 case VINF_EM_TERMINATE:
1019 case VINF_EM_OFF:
1020 case VINF_EM_RESET:
1021 case VINF_EM_NO_MEMORY:
1022 case VINF_EM_RAW_STALE_SELECTOR:
1023 case VINF_EM_RAW_IRET_TRAP:
1024 case VERR_TRPM_PANIC:
1025 case VERR_TRPM_DONT_PANIC:
1026 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1027 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1028 case VERR_VMM_RING0_ASSERTION:
1029 case VERR_VMM_HYPER_CR3_MISMATCH:
1030 case VERR_VMM_RING3_CALL_DISABLED:
1031 case VERR_INTERNAL_ERROR:
1032 case VERR_INTERNAL_ERROR_2:
1033 case VERR_INTERNAL_ERROR_3:
1034 case VERR_INTERNAL_ERROR_4:
1035 case VERR_INTERNAL_ERROR_5:
1036 case VERR_IPE_UNEXPECTED_STATUS:
1037 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1038 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1039 return rc;
1040
1041 /*
1042 * The rest is unexpected, and will keep us here.
1043 */
1044 default:
1045 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1046 break;
1047 }
1048 } /* debug for ever */
1049}
1050
1051
1052#if defined(VBOX_WITH_REM) || defined(DEBUG)
1053/**
1054 * Steps recompiled code.
1055 *
1056 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1057 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1058 *
1059 * @param pVM The cross context VM structure.
1060 * @param pVCpu The cross context virtual CPU structure.
1061 */
1062static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1063{
1064 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1065
1066# ifdef VBOX_WITH_REM
1067 EMRemLock(pVM);
1068
1069 /*
1070 * Switch to REM, step instruction, switch back.
1071 */
1072 int rc = REMR3State(pVM, pVCpu);
1073 if (RT_SUCCESS(rc))
1074 {
1075 rc = REMR3Step(pVM, pVCpu);
1076 REMR3StateBack(pVM, pVCpu);
1077 }
1078 EMRemUnlock(pVM);
1079
1080# else
1081 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1082# endif
1083
1084 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1085 return rc;
1086}
1087#endif /* VBOX_WITH_REM || DEBUG */
1088
1089
1090#ifdef VBOX_WITH_REM
1091/**
1092 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1093 * critical section.
1094 *
1095 * @returns false - new fInREMState value.
1096 * @param pVM The cross context VM structure.
1097 * @param pVCpu The cross context virtual CPU structure.
1098 */
1099DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1100{
1101 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1102 REMR3StateBack(pVM, pVCpu);
1103 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1104
1105 EMRemUnlock(pVM);
1106 return false;
1107}
1108#endif
1109
1110
1111/**
1112 * Executes recompiled code.
1113 *
1114 * This function contains the recompiler version of the inner
1115 * execution loop (the outer loop being in EMR3ExecuteVM()).
1116 *
1117 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1118 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1119 *
1120 * @param pVM The cross context VM structure.
1121 * @param pVCpu The cross context virtual CPU structure.
1122 * @param pfFFDone Where to store an indicator telling whether or not
1123 * FFs were done before returning.
1124 *
1125 */
1126static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1127{
1128#ifdef LOG_ENABLED
1129 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1130
1131 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1132 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1133 else
1134 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1135#endif
1136 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1137
1138#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1139 AssertMsg( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1140 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1141 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1142#endif
1143
1144 /*
1145 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1146 * or the REM suggests raw-mode execution.
1147 */
1148 *pfFFDone = false;
1149#ifdef VBOX_WITH_REM
1150 bool fInREMState = false;
1151#else
1152 uint32_t cLoops = 0;
1153#endif
1154 int rc = VINF_SUCCESS;
1155 for (;;)
1156 {
1157#ifdef VBOX_WITH_REM
1158 /*
1159 * Lock REM and update the state if not already in sync.
1160 *
1161 * Note! Big lock, but you are not supposed to own any lock when
1162 * coming in here.
1163 */
1164 if (!fInREMState)
1165 {
1166 EMRemLock(pVM);
1167 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1168
1169 /* Flush the recompiler translation blocks if the VCPU has changed,
1170 also force a full CPU state resync. */
1171 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1172 {
1173 REMFlushTBs(pVM);
1174 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1175 }
1176 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1177
1178 rc = REMR3State(pVM, pVCpu);
1179
1180 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1181 if (RT_FAILURE(rc))
1182 break;
1183 fInREMState = true;
1184
1185 /*
1186 * We might have missed the raising of VMREQ, TIMER and some other
1187 * important FFs while we were busy switching the state. So, check again.
1188 */
1189 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1190 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1191 {
1192 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1193 goto l_REMDoForcedActions;
1194 }
1195 }
1196#endif
1197
1198 /*
1199 * Execute REM.
1200 */
1201 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1202 {
1203 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1204#ifdef VBOX_WITH_REM
1205 rc = REMR3Run(pVM, pVCpu);
1206#else
1207 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1208#endif
1209 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1210 }
1211 else
1212 {
1213 /* Give up this time slice; virtual time continues */
1214 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1215 RTThreadSleep(5);
1216 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1217 rc = VINF_SUCCESS;
1218 }
1219
1220 /*
1221 * Deal with high priority post execution FFs before doing anything
1222 * else. Sync back the state and leave the lock to be on the safe side.
1223 */
1224 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1225 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1226 {
1227#ifdef VBOX_WITH_REM
1228 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1229#endif
1230 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1231 }
1232
1233 /*
1234 * Process the returned status code.
1235 */
1236 if (rc != VINF_SUCCESS)
1237 {
1238 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1239 break;
1240 if (rc != VINF_REM_INTERRUPED_FF)
1241 {
1242#ifndef VBOX_WITH_REM
1243 /* Try dodge unimplemented IEM trouble by reschduling. */
1244 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1245 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1246 {
1247 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1248 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1249 {
1250 rc = VINF_EM_RESCHEDULE;
1251 break;
1252 }
1253 }
1254#endif
1255
1256 /*
1257 * Anything which is not known to us means an internal error
1258 * and the termination of the VM!
1259 */
1260 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1261 break;
1262 }
1263 }
1264
1265
1266 /*
1267 * Check and execute forced actions.
1268 *
1269 * Sync back the VM state and leave the lock before calling any of
1270 * these, you never know what's going to happen here.
1271 */
1272#ifdef VBOX_HIGH_RES_TIMERS_HACK
1273 TMTimerPollVoid(pVM, pVCpu);
1274#endif
1275 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1276 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1277 || VMCPU_FF_IS_ANY_SET(pVCpu,
1278 VMCPU_FF_ALL_REM_MASK
1279 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1280 {
1281#ifdef VBOX_WITH_REM
1282l_REMDoForcedActions:
1283 if (fInREMState)
1284 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1285#endif
1286 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1287 rc = emR3ForcedActions(pVM, pVCpu, rc);
1288 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1289 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1290 if ( rc != VINF_SUCCESS
1291 && rc != VINF_EM_RESCHEDULE_REM)
1292 {
1293 *pfFFDone = true;
1294 break;
1295 }
1296 }
1297
1298#ifndef VBOX_WITH_REM
1299 /*
1300 * Have to check if we can get back to fast execution mode every so often.
1301 */
1302 if (!(++cLoops & 7))
1303 {
1304 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1305 if ( enmCheck != EMSTATE_REM
1306 && enmCheck != EMSTATE_IEM_THEN_REM)
1307 return VINF_EM_RESCHEDULE;
1308 }
1309#endif
1310
1311 } /* The Inner Loop, recompiled execution mode version. */
1312
1313
1314#ifdef VBOX_WITH_REM
1315 /*
1316 * Returning. Sync back the VM state if required.
1317 */
1318 if (fInREMState)
1319 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1320#endif
1321
1322 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1323 return rc;
1324}
1325
1326
1327#ifdef DEBUG
1328
1329int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1330{
1331 EMSTATE enmOldState = pVCpu->em.s.enmState;
1332
1333 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1334
1335 Log(("Single step BEGIN:\n"));
1336 for (uint32_t i = 0; i < cIterations; i++)
1337 {
1338 DBGFR3PrgStep(pVCpu);
1339 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1340 emR3RemStep(pVM, pVCpu);
1341 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1342 break;
1343 }
1344 Log(("Single step END:\n"));
1345 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1346 pVCpu->em.s.enmState = enmOldState;
1347 return VINF_EM_RESCHEDULE;
1348}
1349
1350#endif /* DEBUG */
1351
1352
1353/**
1354 * Try execute the problematic code in IEM first, then fall back on REM if there
1355 * is too much of it or if IEM doesn't implement something.
1356 *
1357 * @returns Strict VBox status code from IEMExecLots.
1358 * @param pVM The cross context VM structure.
1359 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1360 * @param pfFFDone Force flags done indicator.
1361 *
1362 * @thread EMT(pVCpu)
1363 */
1364static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1365{
1366 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1367 *pfFFDone = false;
1368
1369 /*
1370 * Execute in IEM for a while.
1371 */
1372 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1373 {
1374 uint32_t cInstructions;
1375 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1376 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1377 if (rcStrict != VINF_SUCCESS)
1378 {
1379 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1380 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1381 break;
1382
1383 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1384 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1385 return rcStrict;
1386 }
1387
1388 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1389 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1390 {
1391 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1392 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1393 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1394 pVCpu->em.s.enmState = enmNewState;
1395 return VINF_SUCCESS;
1396 }
1397
1398 /*
1399 * Check for pending actions.
1400 */
1401 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
1402 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1403 return VINF_SUCCESS;
1404 }
1405
1406 /*
1407 * Switch to REM.
1408 */
1409 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1410 pVCpu->em.s.enmState = EMSTATE_REM;
1411 return VINF_SUCCESS;
1412}
1413
1414
1415/**
1416 * Decides whether to execute RAW, HWACC or REM.
1417 *
1418 * @returns new EM state
1419 * @param pVM The cross context VM structure.
1420 * @param pVCpu The cross context virtual CPU structure.
1421 */
1422EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1423{
1424 /*
1425 * When forcing raw-mode execution, things are simple.
1426 */
1427 if (pVCpu->em.s.fForceRAW)
1428 return EMSTATE_RAW;
1429
1430 /*
1431 * We stay in the wait for SIPI state unless explicitly told otherwise.
1432 */
1433 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1434 return EMSTATE_WAIT_SIPI;
1435
1436 /*
1437 * Execute everything in IEM?
1438 */
1439 if (pVM->em.s.fIemExecutesAll)
1440 return EMSTATE_IEM;
1441
1442 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1443 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1444 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1445
1446 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1447 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1448 {
1449 if (EMIsHwVirtExecutionEnabled(pVM))
1450 {
1451 if (VM_IS_HM_ENABLED(pVM))
1452 {
1453 if (HMCanExecuteGuest(pVCpu, &pVCpu->cpum.GstCtx))
1454 return EMSTATE_HM;
1455 }
1456 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1457 return EMSTATE_NEM;
1458
1459 /*
1460 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1461 * turns off monitoring features essential for raw mode!
1462 */
1463 return EMSTATE_IEM_THEN_REM;
1464 }
1465 }
1466
1467 /*
1468 * Standard raw-mode:
1469 *
1470 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1471 * or 32 bits protected mode ring 0 code
1472 *
1473 * The tests are ordered by the likelihood of being true during normal execution.
1474 */
1475 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1476 {
1477 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1478 return EMSTATE_REM;
1479 }
1480
1481# ifndef VBOX_RAW_V86
1482 if (EFlags.u32 & X86_EFL_VM) {
1483 Log2(("raw mode refused: VM_MASK\n"));
1484 return EMSTATE_REM;
1485 }
1486# endif
1487
1488 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1489 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1490 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1491 {
1492 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1493 return EMSTATE_REM;
1494 }
1495
1496 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1497 {
1498 uint32_t u32Dummy, u32Features;
1499
1500 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1501 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1502 return EMSTATE_REM;
1503 }
1504
1505 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1506 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1507 || (uSS & X86_SEL_RPL) == 3)
1508 {
1509 if (!EMIsRawRing3Enabled(pVM))
1510 return EMSTATE_REM;
1511
1512 if (!(EFlags.u32 & X86_EFL_IF))
1513 {
1514 Log2(("raw mode refused: IF (RawR3)\n"));
1515 return EMSTATE_REM;
1516 }
1517
1518 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1519 {
1520 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1521 return EMSTATE_REM;
1522 }
1523 }
1524 else
1525 {
1526 if (!EMIsRawRing0Enabled(pVM))
1527 return EMSTATE_REM;
1528
1529 if (EMIsRawRing1Enabled(pVM))
1530 {
1531 /* Only ring 0 and 1 supervisor code. */
1532 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1533 {
1534 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1535 return EMSTATE_REM;
1536 }
1537 }
1538 /* Only ring 0 supervisor code. */
1539 else if ((uSS & X86_SEL_RPL) != 0)
1540 {
1541 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1542 return EMSTATE_REM;
1543 }
1544
1545 // Let's start with pure 32 bits ring 0 code first
1546 /** @todo What's pure 32-bit mode? flat? */
1547 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1548 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1549 {
1550 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1551 return EMSTATE_REM;
1552 }
1553
1554 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1555 if (!(u32CR0 & X86_CR0_WP))
1556 {
1557 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1558 return EMSTATE_REM;
1559 }
1560
1561# ifdef VBOX_WITH_RAW_MODE
1562 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pVCpu->cpum.GstCtx.eip))
1563 {
1564 Log2(("raw r0 mode forced: patch code\n"));
1565# ifdef VBOX_WITH_SAFE_STR
1566 Assert(pVCpu->cpum.GstCtx.tr.Sel);
1567# endif
1568 return EMSTATE_RAW;
1569 }
1570# endif /* VBOX_WITH_RAW_MODE */
1571
1572# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1573 if (!(EFlags.u32 & X86_EFL_IF))
1574 {
1575 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1576 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1577 return EMSTATE_REM;
1578 }
1579# endif
1580
1581# ifndef VBOX_WITH_RAW_RING1
1582 /** @todo still necessary??? */
1583 if (EFlags.Bits.u2IOPL != 0)
1584 {
1585 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1586 return EMSTATE_REM;
1587 }
1588# endif
1589 }
1590
1591 /*
1592 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1593 */
1594 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1595 {
1596 Log2(("raw mode refused: stale CS\n"));
1597 return EMSTATE_REM;
1598 }
1599 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1600 {
1601 Log2(("raw mode refused: stale SS\n"));
1602 return EMSTATE_REM;
1603 }
1604 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1605 {
1606 Log2(("raw mode refused: stale DS\n"));
1607 return EMSTATE_REM;
1608 }
1609 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1610 {
1611 Log2(("raw mode refused: stale ES\n"));
1612 return EMSTATE_REM;
1613 }
1614 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1615 {
1616 Log2(("raw mode refused: stale FS\n"));
1617 return EMSTATE_REM;
1618 }
1619 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1620 {
1621 Log2(("raw mode refused: stale GS\n"));
1622 return EMSTATE_REM;
1623 }
1624
1625# ifdef VBOX_WITH_SAFE_STR
1626 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1627 {
1628 Log(("Raw mode refused -> TR=0\n"));
1629 return EMSTATE_REM;
1630 }
1631# endif
1632
1633 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1634 return EMSTATE_RAW;
1635}
1636
1637
1638/**
1639 * Executes all high priority post execution force actions.
1640 *
1641 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1642 * fatal error status code.
1643 *
1644 * @param pVM The cross context VM structure.
1645 * @param pVCpu The cross context virtual CPU structure.
1646 * @param rc The current strict VBox status code rc.
1647 */
1648VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1649{
1650 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1651
1652 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PDM_CRITSECT))
1653 PDMCritSectBothFF(pVCpu);
1654
1655 /* Update CR3 (Nested Paging case for HM). */
1656 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1657 {
1658 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1659 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1660 if (RT_FAILURE(rc2))
1661 return rc2;
1662 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1663 }
1664
1665 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1666 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1667 {
1668 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1669 if (CPUMIsGuestInPAEMode(pVCpu))
1670 {
1671 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1672 AssertPtr(pPdpes);
1673
1674 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1675 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1676 }
1677 else
1678 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1679 }
1680
1681 /* IEM has pending work (typically memory write after INS instruction). */
1682 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IEM))
1683 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1684
1685 /* IOM has pending work (comitting an I/O or MMIO write). */
1686 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_IOM))
1687 {
1688 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1689 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1690 { /* half likely, or at least it's a line shorter. */ }
1691 else if (rc == VINF_SUCCESS)
1692 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1693 else
1694 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1695 }
1696
1697#ifdef VBOX_WITH_RAW_MODE
1698 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1699 CSAMR3DoPendingAction(pVM, pVCpu);
1700#endif
1701
1702 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
1703 {
1704 if ( rc > VINF_EM_NO_MEMORY
1705 && rc <= VINF_EM_LAST)
1706 rc = VINF_EM_NO_MEMORY;
1707 }
1708
1709 return rc;
1710}
1711
1712
1713/**
1714 * Helper for emR3ForcedActions() for injecting interrupts into the
1715 * guest.
1716 *
1717 * @returns VBox status code.
1718 * @param pVCpu The cross context virtual CPU structure.
1719 * @param pfWakeupPending Where to store whether a wake up from HLT state is
1720 * pending.
1721 * @param pfInjected Where to store whether an interrupt was injected.
1722 */
1723DECLINLINE(int) emR3GstInjectIntr(PVMCPU pVCpu, bool *pfWakeupPending, bool *pfInjected)
1724{
1725 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
1726 *pfWakeupPending = false;
1727 *pfInjected = false;
1728
1729 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1730#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1731 && pVCpu->cpum.GstCtx.hwvirt.fGif
1732#endif
1733#ifdef VBOX_WITH_RAW_MODE
1734 && !PATMIsPatchGCAddr(pVCpu->CTX_SUFF(pVM), pVCpu->cpum.GstCtx.eip)
1735#endif
1736 && pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
1737 {
1738 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1739 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1740 /** @todo this really isn't nice, should properly handle this */
1741 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1742 int rc2 = TRPMR3InjectEvent(pVCpu->CTX_SUFF(pVM), pVCpu, TRPM_HARDWARE_INT);
1743 Assert(rc2 != VINF_VMX_VMEXIT && rc2 != VINF_SVM_VMEXIT);
1744 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
1745 *pfWakeupPending = true;
1746 *pfInjected = true;
1747 return rc2;
1748 }
1749
1750 return VINF_NO_CHANGE;
1751}
1752
1753
1754#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
1755/**
1756 * Helper for emR3ForcedActions() for injecting interrupts into the
1757 * VMX nested-guest.
1758 *
1759 * @returns VBox status code.
1760 * @param pVCpu The cross context virtual CPU structure.
1761 * @param pfWakeupPending Where to store whether a wake up from HLT state is
1762 * pending.
1763 * @param pfInjected Where to store whether an interrrupt was injected.
1764 */
1765static int emR3VmxNstGstInjectIntr(PVMCPU pVCpu, bool *pfWakeupPending, bool *pfInjected)
1766{
1767 Assert(CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx));
1768 *pfWakeupPending = false;
1769 *pfInjected = false;
1770
1771 /** @todo NSTVMX: Interrupt-window VM-exits currently only trigger when an
1772 * interrupt is pending but in reality it should happen as soon as the
1773 * guest is ready to receive interrupts even if no interrupt is pending.
1774 * Doing it before checking the VMCPU_FF_INTERRUPT_APIC or
1775 * VMCPU_FF_INTERRUPT_PIC here doesn't help as the caller already checks for
1776 * it earlier as part of the high-priority pre-mask anyway. */
1777 if ( VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
1778 && CPUMCanVmxNstGstTakePhysIntr(pVCpu, &pVCpu->cpum.GstCtx))
1779 {
1780 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1781 Assert(!VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
1782 if (CPUMIsGuestVmxProcCtlsSet(pVCpu, &pVCpu->cpum.GstCtx, VMX_PROC_CTLS_INT_WINDOW_EXIT))
1783 {
1784 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_VMX_VMEXIT_MASK);
1785 VBOXSTRICTRC rcStrict = IEMExecVmxVmexitIntWindow(pVCpu);
1786 if (RT_SUCCESS(rcStrict))
1787 {
1788 *pfWakeupPending = true;
1789 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1790 if (rcStrict == VINF_VMX_VMEXIT)
1791 return VINF_SUCCESS;
1792 return VBOXSTRICTRC_VAL(rcStrict);
1793 }
1794 AssertMsgFailed(("Interrupt-window Vm-exit failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1795 return VINF_EM_TRIPLE_FAULT;
1796 }
1797
1798 int rc = emR3GstInjectIntr(pVCpu, pfWakeupPending, pfInjected);
1799 if (rc == VINF_VMX_VMEXIT)
1800 rc = VINF_SUCCESS;
1801 return rc;
1802 }
1803
1804 /** @todo NSTVMX: Virtual interrupt injection, virtual-interrupt delivery. */
1805
1806 return VINF_NO_CHANGE;
1807}
1808#endif /* VBOX_WITH_NESTED_HWVIRT_VMX */
1809
1810#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1811/**
1812 * Helper for emR3ForcedActions() for injecting interrupts into the
1813 * SVM nested-guest.
1814 *
1815 * @returns VBox status code.
1816 * @param pVCpu The cross context virtual CPU structure.
1817 * @param pfWakeupPending Where to store whether a wake up from HLT state is
1818 * pending.
1819 * @param pfInjected Where to store whether an interrupt was injected.
1820 */
1821static int emR3SvmNstGstInjectIntr(PVMCPU pVCpu, bool *pfWakeupPending, bool *pfInjected)
1822{
1823 Assert(CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx));
1824 *pfWakeupPending = false;
1825 *pfInjected = false;
1826
1827 PVM pVM = pVCpu->CTX_SUFF(pVM);
1828 Assert(pVCpu->cpum.GstCtx.hwvirt.fGif);
1829 bool fVirtualGif = CPUMGetSvmNstGstVGif(&pVCpu->cpum.GstCtx);
1830#ifdef VBOX_WITH_RAW_MODE
1831 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip);
1832#endif
1833 if (fVirtualGif)
1834 {
1835 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, &pVCpu->cpum.GstCtx))
1836 {
1837 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1838 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1839 {
1840 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1841 {
1842 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1843 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1844 if (RT_SUCCESS(rcStrict))
1845 {
1846 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1847 * doesn't intercept HLT but intercepts INTR? */
1848 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1849 if (rcStrict == VINF_SVM_VMEXIT)
1850 rcStrict = VINF_SUCCESS;
1851 *pfWakeupPending = true;
1852 return VBOXSTRICTRC_VAL(rcStrict);
1853 }
1854
1855 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1856 return VINF_EM_TRIPLE_FAULT;
1857 }
1858
1859 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1860 /** @todo this really isn't nice, should properly handle this */
1861 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1862 Assert(rc != VINF_PGM_CHANGE_MODE);
1863 if (rc == VINF_SVM_VMEXIT)
1864 rc = VINF_SUCCESS;
1865 *pfWakeupPending = true;
1866 *pfInjected = true;
1867 return rc;
1868 }
1869 }
1870
1871 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1872 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, &pVCpu->cpum.GstCtx))
1873 {
1874 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1875 {
1876 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1877 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1878 if (RT_SUCCESS(rcStrict))
1879 {
1880 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1881 * doesn't intercept HLT but intercepts VINTR? */
1882 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1883 if (rcStrict == VINF_SVM_VMEXIT)
1884 rcStrict = VINF_SUCCESS;
1885 *pfWakeupPending = true;
1886 return VBOXSTRICTRC_VAL(rcStrict);
1887 }
1888
1889 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1890 return VINF_EM_TRIPLE_FAULT;
1891 }
1892
1893 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1894 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(&pVCpu->cpum.GstCtx);
1895 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1896 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1897 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1898
1899 *pfWakeupPending = true;
1900 *pfInjected = true;
1901 return VINF_EM_RESCHEDULE;
1902 }
1903 }
1904
1905 return VINF_SUCCESS;
1906}
1907#endif /* VBOX_WITH_NESTED_HWVIRT_SVM */
1908
1909/**
1910 * Executes all pending forced actions.
1911 *
1912 * Forced actions can cause execution delays and execution
1913 * rescheduling. The first we deal with using action priority, so
1914 * that for instance pending timers aren't scheduled and ran until
1915 * right before execution. The rescheduling we deal with using
1916 * return codes. The same goes for VM termination, only in that case
1917 * we exit everything.
1918 *
1919 * @returns VBox status code of equal or greater importance/severity than rc.
1920 * The most important ones are: VINF_EM_RESCHEDULE,
1921 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1922 *
1923 * @param pVM The cross context VM structure.
1924 * @param pVCpu The cross context virtual CPU structure.
1925 * @param rc The current rc.
1926 *
1927 */
1928int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1929{
1930 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1931#ifdef VBOX_STRICT
1932 int rcIrq = VINF_SUCCESS;
1933#endif
1934 int rc2;
1935#define UPDATE_RC() \
1936 do { \
1937 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1938 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1939 break; \
1940 if (!rc || rc2 < rc) \
1941 rc = rc2; \
1942 } while (0)
1943 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1944
1945 /*
1946 * Post execution chunk first.
1947 */
1948 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1949 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1950 {
1951 /*
1952 * EMT Rendezvous (must be serviced before termination).
1953 */
1954 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
1955 {
1956 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1957 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1958 UPDATE_RC();
1959 /** @todo HACK ALERT! The following test is to make sure EM+TM
1960 * thinks the VM is stopped/reset before the next VM state change
1961 * is made. We need a better solution for this, or at least make it
1962 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1963 * VINF_EM_SUSPEND). */
1964 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1965 {
1966 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1967 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1968 return rc;
1969 }
1970 }
1971
1972 /*
1973 * State change request (cleared by vmR3SetStateLocked).
1974 */
1975 if (VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
1976 {
1977 VMSTATE enmState = VMR3GetState(pVM);
1978 switch (enmState)
1979 {
1980 case VMSTATE_FATAL_ERROR:
1981 case VMSTATE_FATAL_ERROR_LS:
1982 case VMSTATE_GURU_MEDITATION:
1983 case VMSTATE_GURU_MEDITATION_LS:
1984 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1985 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1986 return VINF_EM_SUSPEND;
1987
1988 case VMSTATE_DESTROYING:
1989 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1990 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1991 return VINF_EM_TERMINATE;
1992
1993 default:
1994 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1995 }
1996 }
1997
1998 /*
1999 * Debugger Facility polling.
2000 */
2001 if ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2002 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2003 {
2004 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2005 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2006 UPDATE_RC();
2007 }
2008
2009 /*
2010 * Postponed reset request.
2011 */
2012 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
2013 {
2014 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2015 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
2016 UPDATE_RC();
2017 }
2018
2019#ifdef VBOX_WITH_RAW_MODE
2020 /*
2021 * CSAM page scanning.
2022 */
2023 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
2024 && VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
2025 {
2026 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
2027 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
2028 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2029 CSAMR3CheckCodeEx(pVM, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip);
2030 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
2031 }
2032#endif
2033
2034 /*
2035 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
2036 */
2037 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2038 {
2039 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2040 UPDATE_RC();
2041 if (rc == VINF_EM_NO_MEMORY)
2042 return rc;
2043 }
2044
2045 /* check that we got them all */
2046 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2047 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
2048 }
2049
2050 /*
2051 * Normal priority then.
2052 * (Executed in no particular order.)
2053 */
2054 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
2055 {
2056 /*
2057 * PDM Queues are pending.
2058 */
2059 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
2060 PDMR3QueueFlushAll(pVM);
2061
2062 /*
2063 * PDM DMA transfers are pending.
2064 */
2065 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
2066 PDMR3DmaRun(pVM);
2067
2068 /*
2069 * EMT Rendezvous (make sure they are handled before the requests).
2070 */
2071 if (VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2072 {
2073 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2074 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2075 UPDATE_RC();
2076 /** @todo HACK ALERT! The following test is to make sure EM+TM
2077 * thinks the VM is stopped/reset before the next VM state change
2078 * is made. We need a better solution for this, or at least make it
2079 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2080 * VINF_EM_SUSPEND). */
2081 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2082 {
2083 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2084 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2085 return rc;
2086 }
2087 }
2088
2089 /*
2090 * Requests from other threads.
2091 */
2092 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
2093 {
2094 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2095 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
2096 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
2097 {
2098 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2099 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2100 return rc2;
2101 }
2102 UPDATE_RC();
2103 /** @todo HACK ALERT! The following test is to make sure EM+TM
2104 * thinks the VM is stopped/reset before the next VM state change
2105 * is made. We need a better solution for this, or at least make it
2106 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2107 * VINF_EM_SUSPEND). */
2108 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2109 {
2110 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2111 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2112 return rc;
2113 }
2114 }
2115
2116#ifdef VBOX_WITH_REM
2117 /* Replay the handler notification changes. */
2118 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2119 {
2120 /* Try not to cause deadlocks. */
2121 if ( pVM->cCpus == 1
2122 || ( !PGMIsLockOwner(pVM)
2123 && !IOMIsLockWriteOwner(pVM))
2124 )
2125 {
2126 EMRemLock(pVM);
2127 REMR3ReplayHandlerNotifications(pVM);
2128 EMRemUnlock(pVM);
2129 }
2130 }
2131#endif
2132
2133 /* check that we got them all */
2134 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2135 }
2136
2137 /*
2138 * Normal priority then. (per-VCPU)
2139 * (Executed in no particular order.)
2140 */
2141 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
2142 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2143 {
2144 /*
2145 * Requests from other threads.
2146 */
2147 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_REQUEST))
2148 {
2149 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2150 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2151 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2152 {
2153 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2154 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2155 return rc2;
2156 }
2157 UPDATE_RC();
2158 /** @todo HACK ALERT! The following test is to make sure EM+TM
2159 * thinks the VM is stopped/reset before the next VM state change
2160 * is made. We need a better solution for this, or at least make it
2161 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2162 * VINF_EM_SUSPEND). */
2163 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2164 {
2165 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2166 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2167 return rc;
2168 }
2169 }
2170
2171 /* check that we got them all */
2172 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2173 }
2174
2175 /*
2176 * High priority pre execution chunk last.
2177 * (Executed in ascending priority order.)
2178 */
2179 if ( VM_FF_IS_ANY_SET(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2180 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2181 {
2182 /*
2183 * Timers before interrupts.
2184 */
2185 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TIMER)
2186 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2187 TMR3TimerQueuesDo(pVM);
2188
2189 /*
2190 * Pick up asynchronously posted interrupts into the APIC.
2191 */
2192 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2193 APICUpdatePendingInterrupts(pVCpu);
2194
2195 /*
2196 * The instruction following an emulated STI should *always* be executed!
2197 *
2198 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2199 * the eip is the same as the inhibited instr address. Before we
2200 * are able to execute this instruction in raw mode (iret to
2201 * guest code) an external interrupt might force a world switch
2202 * again. Possibly allowing a guest interrupt to be dispatched
2203 * in the process. This could break the guest. Sounds very
2204 * unlikely, but such timing sensitive problem are not as rare as
2205 * you might think.
2206 */
2207 if ( VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2208 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2209 {
2210 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2211 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2212 {
2213 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2214 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2215 }
2216 else
2217 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2218 }
2219
2220 /*
2221 * Interrupts.
2222 */
2223 bool fWakeupPending = false;
2224 if ( !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY)
2225 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
2226 && !VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2227 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2228 {
2229 Assert(!HMR3IsEventPending(pVCpu));
2230
2231 bool fInjected;
2232#ifdef VBOX_WITH_NESTED_HWVIRT_VMX
2233 if (CPUMIsGuestInVmxNonRootMode(&pVCpu->cpum.GstCtx))
2234 rc2 = emR3VmxNstGstInjectIntr(pVCpu, &fWakeupPending, &fInjected);
2235 else
2236#endif
2237#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2238 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
2239 rc2 = emR3SvmNstGstInjectIntr(pVCpu, &fWakeupPending, &fInjected);
2240 else
2241#endif
2242 {
2243 rc2 = emR3GstInjectIntr(pVCpu, &fWakeupPending, &fInjected);
2244 }
2245 if (rc2 != VINF_NO_CHANGE)
2246 {
2247 if ( pVM->em.s.fIemExecutesAll
2248 && ( rc2 == VINF_EM_RESCHEDULE_REM
2249 || rc2 == VINF_EM_RESCHEDULE_HM
2250 || rc2 == VINF_EM_RESCHEDULE_RAW))
2251 {
2252 rc2 = VINF_EM_RESCHEDULE;
2253 }
2254#ifdef VBOX_STRICT
2255 if (fInjected)
2256 rcIrq = rc2;
2257#endif
2258 UPDATE_RC();
2259 }
2260 }
2261
2262 /*
2263 * Allocate handy pages.
2264 */
2265 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2266 {
2267 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2268 UPDATE_RC();
2269 }
2270
2271 /*
2272 * Debugger Facility request.
2273 */
2274 if ( ( VM_FF_IS_SET(pVM, VM_FF_DBGF)
2275 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_DBGF) )
2276 && !VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY) )
2277 {
2278 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2279 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2280 UPDATE_RC();
2281 }
2282
2283 /*
2284 * EMT Rendezvous (must be serviced before termination).
2285 */
2286 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2287 && VM_FF_IS_SET(pVM, VM_FF_EMT_RENDEZVOUS))
2288 {
2289 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2290 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2291 UPDATE_RC();
2292 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2293 * stopped/reset before the next VM state change is made. We need a better
2294 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2295 * && rc >= VINF_EM_SUSPEND). */
2296 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2297 {
2298 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2299 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2300 return rc;
2301 }
2302 }
2303
2304 /*
2305 * State change request (cleared by vmR3SetStateLocked).
2306 */
2307 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2308 && VM_FF_IS_SET(pVM, VM_FF_CHECK_VM_STATE))
2309 {
2310 VMSTATE enmState = VMR3GetState(pVM);
2311 switch (enmState)
2312 {
2313 case VMSTATE_FATAL_ERROR:
2314 case VMSTATE_FATAL_ERROR_LS:
2315 case VMSTATE_GURU_MEDITATION:
2316 case VMSTATE_GURU_MEDITATION_LS:
2317 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2318 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2319 return VINF_EM_SUSPEND;
2320
2321 case VMSTATE_DESTROYING:
2322 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2323 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2324 return VINF_EM_TERMINATE;
2325
2326 default:
2327 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2328 }
2329 }
2330
2331 /*
2332 * Out of memory? Since most of our fellow high priority actions may cause us
2333 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2334 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2335 * than us since we can terminate without allocating more memory.
2336 */
2337 if (VM_FF_IS_SET(pVM, VM_FF_PGM_NO_MEMORY))
2338 {
2339 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2340 UPDATE_RC();
2341 if (rc == VINF_EM_NO_MEMORY)
2342 return rc;
2343 }
2344
2345 /*
2346 * If the virtual sync clock is still stopped, make TM restart it.
2347 */
2348 if (VM_FF_IS_SET(pVM, VM_FF_TM_VIRTUAL_SYNC))
2349 TMR3VirtualSyncFF(pVM, pVCpu);
2350
2351#ifdef DEBUG
2352 /*
2353 * Debug, pause the VM.
2354 */
2355 if (VM_FF_IS_SET(pVM, VM_FF_DEBUG_SUSPEND))
2356 {
2357 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2358 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2359 return VINF_EM_SUSPEND;
2360 }
2361#endif
2362
2363 /* check that we got them all */
2364 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2365 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VMCPU_FF_INTERRUPT_NESTED_GUEST | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2366 }
2367
2368#undef UPDATE_RC
2369 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2370 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2371 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2372 return rc;
2373}
2374
2375
2376/**
2377 * Check if the preset execution time cap restricts guest execution scheduling.
2378 *
2379 * @returns true if allowed, false otherwise
2380 * @param pVM The cross context VM structure.
2381 * @param pVCpu The cross context virtual CPU structure.
2382 */
2383bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2384{
2385 uint64_t u64UserTime, u64KernelTime;
2386
2387 if ( pVM->uCpuExecutionCap != 100
2388 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2389 {
2390 uint64_t u64TimeNow = RTTimeMilliTS();
2391 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2392 {
2393 /* New time slice. */
2394 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2395 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2396 pVCpu->em.s.u64TimeSliceExec = 0;
2397 }
2398 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2399
2400 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2401 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2402 return false;
2403 }
2404 return true;
2405}
2406
2407
2408/**
2409 * Execute VM.
2410 *
2411 * This function is the main loop of the VM. The emulation thread
2412 * calls this function when the VM has been successfully constructed
2413 * and we're ready for executing the VM.
2414 *
2415 * Returning from this function means that the VM is turned off or
2416 * suspended (state already saved) and deconstruction is next in line.
2417 *
2418 * All interaction from other thread are done using forced actions
2419 * and signaling of the wait object.
2420 *
2421 * @returns VBox status code, informational status codes may indicate failure.
2422 * @param pVM The cross context VM structure.
2423 * @param pVCpu The cross context virtual CPU structure.
2424 */
2425VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2426{
2427 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2428 pVM,
2429 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2430 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2431 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2432 pVCpu->em.s.fForceRAW));
2433 VM_ASSERT_EMT(pVM);
2434 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2435 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2436 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2437 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2438
2439 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2440 if (rc == 0)
2441 {
2442 /*
2443 * Start the virtual time.
2444 */
2445 TMR3NotifyResume(pVM, pVCpu);
2446
2447 /*
2448 * The Outer Main Loop.
2449 */
2450 bool fFFDone = false;
2451
2452 /* Reschedule right away to start in the right state. */
2453 rc = VINF_SUCCESS;
2454
2455 /* If resuming after a pause or a state load, restore the previous
2456 state or else we'll start executing code. Else, just reschedule. */
2457 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2458 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2459 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2460 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2461 else
2462 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2463 pVCpu->em.s.cIemThenRemInstructions = 0;
2464 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2465
2466 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2467 for (;;)
2468 {
2469 /*
2470 * Before we can schedule anything (we're here because
2471 * scheduling is required) we must service any pending
2472 * forced actions to avoid any pending action causing
2473 * immediate rescheduling upon entering an inner loop
2474 *
2475 * Do forced actions.
2476 */
2477 if ( !fFFDone
2478 && RT_SUCCESS(rc)
2479 && rc != VINF_EM_TERMINATE
2480 && rc != VINF_EM_OFF
2481 && ( VM_FF_IS_ANY_SET(pVM, VM_FF_ALL_REM_MASK)
2482 || VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2483 {
2484 rc = emR3ForcedActions(pVM, pVCpu, rc);
2485 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2486 if ( ( rc == VINF_EM_RESCHEDULE_REM
2487 || rc == VINF_EM_RESCHEDULE_HM)
2488 && pVCpu->em.s.fForceRAW)
2489 rc = VINF_EM_RESCHEDULE_RAW;
2490 }
2491 else if (fFFDone)
2492 fFFDone = false;
2493
2494 /*
2495 * Now what to do?
2496 */
2497 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2498 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2499 switch (rc)
2500 {
2501 /*
2502 * Keep doing what we're currently doing.
2503 */
2504 case VINF_SUCCESS:
2505 break;
2506
2507 /*
2508 * Reschedule - to raw-mode execution.
2509 */
2510/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2511 case VINF_EM_RESCHEDULE_RAW:
2512 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2513 if (VM_IS_RAW_MODE_ENABLED(pVM))
2514 {
2515 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2516 pVCpu->em.s.enmState = EMSTATE_RAW;
2517 }
2518 else
2519 {
2520 AssertLogRelFailed();
2521 pVCpu->em.s.enmState = EMSTATE_NONE;
2522 }
2523 break;
2524
2525 /*
2526 * Reschedule - to HM or NEM.
2527 */
2528 case VINF_EM_RESCHEDULE_HM:
2529 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2530 Assert(!pVCpu->em.s.fForceRAW);
2531 if (VM_IS_HM_ENABLED(pVM))
2532 {
2533 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2534 pVCpu->em.s.enmState = EMSTATE_HM;
2535 }
2536 else if (VM_IS_NEM_ENABLED(pVM))
2537 {
2538 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2539 pVCpu->em.s.enmState = EMSTATE_NEM;
2540 }
2541 else
2542 {
2543 AssertLogRelFailed();
2544 pVCpu->em.s.enmState = EMSTATE_NONE;
2545 }
2546 break;
2547
2548 /*
2549 * Reschedule - to recompiled execution.
2550 */
2551 case VINF_EM_RESCHEDULE_REM:
2552 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2553 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2554 {
2555 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2556 enmOldState, EMSTATE_IEM_THEN_REM));
2557 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2558 {
2559 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2560 pVCpu->em.s.cIemThenRemInstructions = 0;
2561 }
2562 }
2563 else
2564 {
2565 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2566 pVCpu->em.s.enmState = EMSTATE_REM;
2567 }
2568 break;
2569
2570 /*
2571 * Resume.
2572 */
2573 case VINF_EM_RESUME:
2574 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2575 /* Don't reschedule in the halted or wait for SIPI case. */
2576 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2577 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2578 {
2579 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2580 break;
2581 }
2582 /* fall through and get scheduled. */
2583 RT_FALL_THRU();
2584
2585 /*
2586 * Reschedule.
2587 */
2588 case VINF_EM_RESCHEDULE:
2589 {
2590 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2591 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2592 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2593 pVCpu->em.s.cIemThenRemInstructions = 0;
2594 pVCpu->em.s.enmState = enmState;
2595 break;
2596 }
2597
2598 /*
2599 * Halted.
2600 */
2601 case VINF_EM_HALT:
2602 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2603 pVCpu->em.s.enmState = EMSTATE_HALTED;
2604 break;
2605
2606 /*
2607 * Switch to the wait for SIPI state (application processor only)
2608 */
2609 case VINF_EM_WAIT_SIPI:
2610 Assert(pVCpu->idCpu != 0);
2611 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2612 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2613 break;
2614
2615
2616 /*
2617 * Suspend.
2618 */
2619 case VINF_EM_SUSPEND:
2620 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2621 Assert(enmOldState != EMSTATE_SUSPENDED);
2622 pVCpu->em.s.enmPrevState = enmOldState;
2623 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2624 break;
2625
2626 /*
2627 * Reset.
2628 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2629 */
2630 case VINF_EM_RESET:
2631 {
2632 if (pVCpu->idCpu == 0)
2633 {
2634 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2635 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2636 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2637 pVCpu->em.s.cIemThenRemInstructions = 0;
2638 pVCpu->em.s.enmState = enmState;
2639 }
2640 else
2641 {
2642 /* All other VCPUs go into the wait for SIPI state. */
2643 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2644 }
2645 break;
2646 }
2647
2648 /*
2649 * Power Off.
2650 */
2651 case VINF_EM_OFF:
2652 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2653 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2654 TMR3NotifySuspend(pVM, pVCpu);
2655 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2656 return rc;
2657
2658 /*
2659 * Terminate the VM.
2660 */
2661 case VINF_EM_TERMINATE:
2662 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2663 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2664 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2665 TMR3NotifySuspend(pVM, pVCpu);
2666 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2667 return rc;
2668
2669
2670 /*
2671 * Out of memory, suspend the VM and stuff.
2672 */
2673 case VINF_EM_NO_MEMORY:
2674 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2675 Assert(enmOldState != EMSTATE_SUSPENDED);
2676 pVCpu->em.s.enmPrevState = enmOldState;
2677 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2678 TMR3NotifySuspend(pVM, pVCpu);
2679 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2680
2681 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2682 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2683 if (rc != VINF_EM_SUSPEND)
2684 {
2685 if (RT_SUCCESS_NP(rc))
2686 {
2687 AssertLogRelMsgFailed(("%Rrc\n", rc));
2688 rc = VERR_EM_INTERNAL_ERROR;
2689 }
2690 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2691 }
2692 return rc;
2693
2694 /*
2695 * Guest debug events.
2696 */
2697 case VINF_EM_DBG_STEPPED:
2698 case VINF_EM_DBG_STOP:
2699 case VINF_EM_DBG_EVENT:
2700 case VINF_EM_DBG_BREAKPOINT:
2701 case VINF_EM_DBG_STEP:
2702 if (enmOldState == EMSTATE_RAW)
2703 {
2704 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2705 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2706 }
2707 else if (enmOldState == EMSTATE_HM)
2708 {
2709 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2710 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2711 }
2712 else if (enmOldState == EMSTATE_NEM)
2713 {
2714 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2715 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2716 }
2717 else if (enmOldState == EMSTATE_REM)
2718 {
2719 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2720 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2721 }
2722 else
2723 {
2724 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2725 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2726 }
2727 break;
2728
2729 /*
2730 * Hypervisor debug events.
2731 */
2732 case VINF_EM_DBG_HYPER_STEPPED:
2733 case VINF_EM_DBG_HYPER_BREAKPOINT:
2734 case VINF_EM_DBG_HYPER_ASSERTION:
2735 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2736 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2737 break;
2738
2739 /*
2740 * Triple fault.
2741 */
2742 case VINF_EM_TRIPLE_FAULT:
2743 if (!pVM->em.s.fGuruOnTripleFault)
2744 {
2745 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2746 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2747 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2748 continue;
2749 }
2750 /* Else fall through and trigger a guru. */
2751 RT_FALL_THRU();
2752
2753 case VERR_VMM_RING0_ASSERTION:
2754 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2755 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2756 break;
2757
2758 /*
2759 * Any error code showing up here other than the ones we
2760 * know and process above are considered to be FATAL.
2761 *
2762 * Unknown warnings and informational status codes are also
2763 * included in this.
2764 */
2765 default:
2766 if (RT_SUCCESS_NP(rc))
2767 {
2768 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2769 rc = VERR_EM_INTERNAL_ERROR;
2770 }
2771 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2772 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2773 break;
2774 }
2775
2776 /*
2777 * Act on state transition.
2778 */
2779 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2780 if (enmOldState != enmNewState)
2781 {
2782 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2783
2784 /* Clear MWait flags and the unhalt FF. */
2785 if ( enmOldState == EMSTATE_HALTED
2786 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2787 || VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2788 && ( enmNewState == EMSTATE_RAW
2789 || enmNewState == EMSTATE_HM
2790 || enmNewState == EMSTATE_NEM
2791 || enmNewState == EMSTATE_REM
2792 || enmNewState == EMSTATE_IEM_THEN_REM
2793 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2794 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2795 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2796 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2797 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2798 {
2799 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2800 {
2801 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2802 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2803 }
2804 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_UNHALT))
2805 {
2806 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2807 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2808 }
2809 }
2810 }
2811 else
2812 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2813
2814 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2815 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2816
2817 /*
2818 * Act on the new state.
2819 */
2820 switch (enmNewState)
2821 {
2822 /*
2823 * Execute raw.
2824 */
2825 case EMSTATE_RAW:
2826#ifdef VBOX_WITH_RAW_MODE
2827 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2828#else
2829 AssertLogRelMsgFailed(("%Rrc\n", rc));
2830 rc = VERR_EM_INTERNAL_ERROR;
2831#endif
2832 break;
2833
2834 /*
2835 * Execute hardware accelerated raw.
2836 */
2837 case EMSTATE_HM:
2838 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2839 break;
2840
2841 /*
2842 * Execute hardware accelerated raw.
2843 */
2844 case EMSTATE_NEM:
2845 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2846 break;
2847
2848 /*
2849 * Execute recompiled.
2850 */
2851 case EMSTATE_REM:
2852 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2853 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2854 break;
2855
2856 /*
2857 * Execute in the interpreter.
2858 */
2859 case EMSTATE_IEM:
2860 {
2861#if 0 /* For testing purposes. */
2862 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2863 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2864 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2865 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2866 rc = VINF_SUCCESS;
2867 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2868#endif
2869 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2870 if (pVM->em.s.fIemExecutesAll)
2871 {
2872 Assert(rc != VINF_EM_RESCHEDULE_REM);
2873 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2874 Assert(rc != VINF_EM_RESCHEDULE_HM);
2875 }
2876 fFFDone = false;
2877 break;
2878 }
2879
2880 /*
2881 * Execute in IEM, hoping we can quickly switch aback to HM
2882 * or RAW execution. If our hopes fail, we go to REM.
2883 */
2884 case EMSTATE_IEM_THEN_REM:
2885 {
2886 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2887 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2888 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2889 break;
2890 }
2891
2892 /*
2893 * Application processor execution halted until SIPI.
2894 */
2895 case EMSTATE_WAIT_SIPI:
2896 /* no break */
2897 /*
2898 * hlt - execution halted until interrupt.
2899 */
2900 case EMSTATE_HALTED:
2901 {
2902 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2903 /* If HM (or someone else) store a pending interrupt in
2904 TRPM, it must be dispatched ASAP without any halting.
2905 Anything pending in TRPM has been accepted and the CPU
2906 should already be the right state to receive it. */
2907 if (TRPMHasTrap(pVCpu))
2908 rc = VINF_EM_RESCHEDULE;
2909 /* MWAIT has a special extension where it's woken up when
2910 an interrupt is pending even when IF=0. */
2911 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2912 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2913 {
2914 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2915 if (rc == VINF_SUCCESS)
2916 {
2917 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2918 APICUpdatePendingInterrupts(pVCpu);
2919
2920 if (VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2921 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2922 {
2923 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2924 rc = VINF_EM_RESCHEDULE;
2925 }
2926 }
2927 }
2928 else
2929 {
2930 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2931 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2932 check VMCPU_FF_UPDATE_APIC here. */
2933 if ( rc == VINF_SUCCESS
2934 && VMCPU_FF_IS_ANY_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2935 {
2936 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2937 rc = VINF_EM_RESCHEDULE;
2938 }
2939 }
2940
2941 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2942 break;
2943 }
2944
2945 /*
2946 * Suspended - return to VM.cpp.
2947 */
2948 case EMSTATE_SUSPENDED:
2949 TMR3NotifySuspend(pVM, pVCpu);
2950 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2951 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2952 return VINF_EM_SUSPEND;
2953
2954 /*
2955 * Debugging in the guest.
2956 */
2957 case EMSTATE_DEBUG_GUEST_RAW:
2958 case EMSTATE_DEBUG_GUEST_HM:
2959 case EMSTATE_DEBUG_GUEST_NEM:
2960 case EMSTATE_DEBUG_GUEST_IEM:
2961 case EMSTATE_DEBUG_GUEST_REM:
2962 TMR3NotifySuspend(pVM, pVCpu);
2963 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2964 TMR3NotifyResume(pVM, pVCpu);
2965 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2966 break;
2967
2968 /*
2969 * Debugging in the hypervisor.
2970 */
2971 case EMSTATE_DEBUG_HYPER:
2972 {
2973 TMR3NotifySuspend(pVM, pVCpu);
2974 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2975
2976 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2977 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2978 if (rc != VINF_SUCCESS)
2979 {
2980 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2981 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2982 else
2983 {
2984 /* switch to guru meditation mode */
2985 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2986 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2987 VMMR3FatalDump(pVM, pVCpu, rc);
2988 }
2989 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2990 return rc;
2991 }
2992
2993 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2994 TMR3NotifyResume(pVM, pVCpu);
2995 break;
2996 }
2997
2998 /*
2999 * Guru meditation takes place in the debugger.
3000 */
3001 case EMSTATE_GURU_MEDITATION:
3002 {
3003 TMR3NotifySuspend(pVM, pVCpu);
3004 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3005 VMMR3FatalDump(pVM, pVCpu, rc);
3006 emR3Debug(pVM, pVCpu, rc);
3007 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3008 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3009 return rc;
3010 }
3011
3012 /*
3013 * The states we don't expect here.
3014 */
3015 case EMSTATE_NONE:
3016 case EMSTATE_TERMINATING:
3017 default:
3018 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3019 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3020 TMR3NotifySuspend(pVM, pVCpu);
3021 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3022 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3023 return VERR_EM_INTERNAL_ERROR;
3024 }
3025 } /* The Outer Main Loop */
3026 }
3027 else
3028 {
3029 /*
3030 * Fatal error.
3031 */
3032 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3033 TMR3NotifySuspend(pVM, pVCpu);
3034 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3035 VMMR3FatalDump(pVM, pVCpu, rc);
3036 emR3Debug(pVM, pVCpu, rc);
3037 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3038 /** @todo change the VM state! */
3039 return rc;
3040 }
3041
3042 /* not reached */
3043}
3044
3045/**
3046 * Notify EM of a state change (used by FTM)
3047 *
3048 * @param pVM The cross context VM structure.
3049 */
3050VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3051{
3052 PVMCPU pVCpu = VMMGetCpu(pVM);
3053
3054 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3055 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3056 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3057 return VINF_SUCCESS;
3058}
3059
3060/**
3061 * Notify EM of a state change (used by FTM)
3062 *
3063 * @param pVM The cross context VM structure.
3064 */
3065VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3066{
3067 PVMCPU pVCpu = VMMGetCpu(pVM);
3068 EMSTATE enmCurState = pVCpu->em.s.enmState;
3069
3070 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3071 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3072 pVCpu->em.s.enmPrevState = enmCurState;
3073 return VINF_SUCCESS;
3074}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette