VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 73009

Last change on this file since 73009 was 72749, checked in by vboxsync, 7 years ago

VMM: Remove EM_NOTIFY_HM and related code unused since VirtualBox 2.1 or earlier.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 146.9 KB
Line 
1/* $Id: EM.cpp 72749 2018-06-29 07:57:05Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Internal Functions *
79*********************************************************************************************************************************/
80static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
81static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
82#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
83static const char *emR3GetStateName(EMSTATE enmState);
84#endif
85static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
86#if defined(VBOX_WITH_REM) || defined(DEBUG)
87static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
88#endif
89static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
90
91
92/**
93 * Initializes the EM.
94 *
95 * @returns VBox status code.
96 * @param pVM The cross context VM structure.
97 */
98VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
99{
100 LogFlow(("EMR3Init\n"));
101 /*
102 * Assert alignment and sizes.
103 */
104 AssertCompileMemberAlignment(VM, em.s, 32);
105 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
106 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
107
108 /*
109 * Init the structure.
110 */
111 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
112 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
113 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
114
115 bool fEnabled;
116 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
117 AssertLogRelRCReturn(rc, rc);
118 pVM->fRecompileUser = !fEnabled;
119
120 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
121 AssertLogRelRCReturn(rc, rc);
122 pVM->fRecompileSupervisor = !fEnabled;
123
124#ifdef VBOX_WITH_RAW_RING1
125 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
126 AssertLogRelRCReturn(rc, rc);
127#else
128 pVM->fRawRing1Enabled = false; /* Disabled by default. */
129#endif
130
131 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
132 AssertLogRelRCReturn(rc, rc);
133
134 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
135 AssertLogRelRCReturn(rc, rc);
136 pVM->em.s.fGuruOnTripleFault = !fEnabled;
137 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
138 {
139 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
140 pVM->em.s.fGuruOnTripleFault = true;
141 }
142
143 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
144 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
145
146 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
147 * Whether to try correlate exit history in any context, detect hot spots and
148 * try optimize these using IEM if there are other exits close by. This
149 * overrides the context specific settings. */
150 bool fExitOptimizationEnabled = true;
151 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
152 AssertLogRelRCReturn(rc, rc);
153
154 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
155 * Whether to optimize exits in ring-0. Setting this to false will also disable
156 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
157 * capabilities of the host kernel, this optimization may be unavailable. */
158 bool fExitOptimizationEnabledR0 = true;
159 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
160 AssertLogRelRCReturn(rc, rc);
161 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
162
163 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
164 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
165 * hooks are in effect). */
166 /** @todo change the default to true here */
167 bool fExitOptimizationEnabledR0PreemptDisabled = true;
168 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
169 AssertLogRelRCReturn(rc, rc);
170 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
171
172 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
173 * Maximum number of instruction to let EMHistoryExec execute in one go. */
174 uint16_t cHistoryExecMaxInstructions = 8192;
175 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
176 AssertLogRelRCReturn(rc, rc);
177 if (cHistoryExecMaxInstructions < 16)
178 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
179
180 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
181 * Maximum number of instruction between exits during probing. */
182 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
183#ifdef RT_OS_WINDOWS
184 if (VM_IS_NEM_ENABLED(pVM))
185 cHistoryProbeMaxInstructionsWithoutExit = 32;
186#endif
187 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
188 cHistoryProbeMaxInstructionsWithoutExit);
189 AssertLogRelRCReturn(rc, rc);
190 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
191 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
192 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
193
194 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
195 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
196 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
197 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
198 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
199 cHistoryProbeMinInstructions);
200 AssertLogRelRCReturn(rc, rc);
201
202 for (VMCPUID i = 0; i < pVM->cCpus; i++)
203 {
204 pVM->aCpus[i].em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
205 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
206 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
207
208 pVM->aCpus[i].em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
209 pVM->aCpus[i].em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
210 pVM->aCpus[i].em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
211 }
212
213#ifdef VBOX_WITH_REM
214 /*
215 * Initialize the REM critical section.
216 */
217 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
218 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
219 AssertRCReturn(rc, rc);
220#endif
221
222 /*
223 * Saved state.
224 */
225 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
226 NULL, NULL, NULL,
227 NULL, emR3Save, NULL,
228 NULL, emR3Load, NULL);
229 if (RT_FAILURE(rc))
230 return rc;
231
232 for (VMCPUID i = 0; i < pVM->cCpus; i++)
233 {
234 PVMCPU pVCpu = &pVM->aCpus[i];
235
236 pVCpu->em.s.enmState = i == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
237 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
238 pVCpu->em.s.fForceRAW = false;
239 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
240 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
241
242#ifdef VBOX_WITH_RAW_MODE
243 if (VM_IS_RAW_MODE_ENABLED(pVM))
244 {
245 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
246 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
247 }
248#endif
249
250# define EM_REG_COUNTER(a, b, c) \
251 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
252 AssertRC(rc);
253
254# define EM_REG_COUNTER_USED(a, b, c) \
255 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
256 AssertRC(rc);
257
258# define EM_REG_PROFILE(a, b, c) \
259 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
260 AssertRC(rc);
261
262# define EM_REG_PROFILE_ADV(a, b, c) \
263 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
264 AssertRC(rc);
265
266 /*
267 * Statistics.
268 */
269#ifdef VBOX_WITH_STATISTICS
270 PEMSTATS pStats;
271 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
272 if (RT_FAILURE(rc))
273 return rc;
274
275 pVCpu->em.s.pStatsR3 = pStats;
276 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
277 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
278
279 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
280 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
281
282 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
283 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
284
285 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
359
360 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
361 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
362
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
391 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
392 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
393 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
394 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
395 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
396 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
397 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
398 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
399 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
400 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
401 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
402 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
403 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
404 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
405 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
406 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
407 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
408 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
409 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
410 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
411 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
412 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
413 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
414 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
415
416 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
417 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
418 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
419 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
420 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
421 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
422 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
423 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
424 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
425 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
426 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
427 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
428 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
429 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
430 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
431 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
432 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
433 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
434 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
435 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
436 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
437 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
438 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
439 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
440 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
441 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
442 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
443 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
444
445 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
446 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
447 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
448 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
449
450 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
451 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
452 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
453 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
454 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
455 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
456 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
457 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
458 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
459 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
460 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
461 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
462 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
463 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
464 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
465 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
466 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
467 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
468 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
469 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
470 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
471 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
472 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
473 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
474 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
475 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
476
477 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
478 pVCpu->em.s.pCliStatTree = 0;
479
480 /* these should be considered for release statistics. */
481 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
482 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
483 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
484 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
485 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
486 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
487 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
488 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
489#endif /* VBOX_WITH_STATISTICS */
490 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
491 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
492#ifdef VBOX_WITH_STATISTICS
493 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
494 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
495 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
496 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
497 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
498 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
499#endif /* VBOX_WITH_STATISTICS */
500
501 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
502 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
503 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
504 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
505 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
506
507 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
508
509 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
510 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", i);
511 AssertRC(rc);
512
513 /* History record statistics */
514 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
515 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", i);
516 AssertRC(rc);
517
518 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
519 {
520 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
521 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", i, iStep);
522 AssertRC(rc);
523 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
524 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", i, iStep);
525 AssertRC(rc);
526 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
527 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", i, iStep);
528 AssertRC(rc);
529 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
530 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", i, iStep);
531 AssertRC(rc);
532 }
533
534 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%d/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
535 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%d/ExitOpt/ExecSavedExit", "Net number of saved exits.");
536 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%d/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
537 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%d/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
538 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%d/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
539 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%d/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
540 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%d/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
541 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%d/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
542 }
543
544 emR3InitDbg(pVM);
545 return VINF_SUCCESS;
546}
547
548
549/**
550 * Called when a VM initialization stage is completed.
551 *
552 * @returns VBox status code.
553 * @param pVM The cross context VM structure.
554 * @param enmWhat The initialization state that was completed.
555 */
556VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
557{
558 if (enmWhat == VMINITCOMPLETED_RING0)
559 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
560 pVM->aCpus[0].em.s.fExitOptimizationEnabled, pVM->aCpus[0].em.s.fExitOptimizationEnabledR0,
561 pVM->aCpus[0].em.s.fExitOptimizationEnabledR0PreemptDisabled));
562 return VINF_SUCCESS;
563}
564
565
566/**
567 * Applies relocations to data and code managed by this
568 * component. This function will be called at init and
569 * whenever the VMM need to relocate it self inside the GC.
570 *
571 * @param pVM The cross context VM structure.
572 */
573VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
574{
575 LogFlow(("EMR3Relocate\n"));
576 for (VMCPUID i = 0; i < pVM->cCpus; i++)
577 {
578 PVMCPU pVCpu = &pVM->aCpus[i];
579 if (pVCpu->em.s.pStatsR3)
580 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
581 }
582}
583
584
585/**
586 * Reset the EM state for a CPU.
587 *
588 * Called by EMR3Reset and hot plugging.
589 *
590 * @param pVCpu The cross context virtual CPU structure.
591 */
592VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
593{
594 /* Reset scheduling state. */
595 pVCpu->em.s.fForceRAW = false;
596 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
597
598 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
599 out of the HALTED state here so that enmPrevState doesn't end up as
600 HALTED when EMR3Execute returns. */
601 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
602 {
603 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
604 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
605 }
606}
607
608
609/**
610 * Reset notification.
611 *
612 * @param pVM The cross context VM structure.
613 */
614VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
615{
616 Log(("EMR3Reset: \n"));
617 for (VMCPUID i = 0; i < pVM->cCpus; i++)
618 EMR3ResetCpu(&pVM->aCpus[i]);
619}
620
621
622/**
623 * Terminates the EM.
624 *
625 * Termination means cleaning up and freeing all resources,
626 * the VM it self is at this point powered off or suspended.
627 *
628 * @returns VBox status code.
629 * @param pVM The cross context VM structure.
630 */
631VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
632{
633 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
634
635#ifdef VBOX_WITH_REM
636 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
637#else
638 RT_NOREF(pVM);
639#endif
640 return VINF_SUCCESS;
641}
642
643
644/**
645 * Execute state save operation.
646 *
647 * @returns VBox status code.
648 * @param pVM The cross context VM structure.
649 * @param pSSM SSM operation handle.
650 */
651static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
652{
653 for (VMCPUID i = 0; i < pVM->cCpus; i++)
654 {
655 PVMCPU pVCpu = &pVM->aCpus[i];
656
657 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
658
659 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
660 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
661 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
662
663 /* Save mwait state. */
664 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
665 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
666 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
667 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
668 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
669 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
670 AssertRCReturn(rc, rc);
671 }
672 return VINF_SUCCESS;
673}
674
675
676/**
677 * Execute state load operation.
678 *
679 * @returns VBox status code.
680 * @param pVM The cross context VM structure.
681 * @param pSSM SSM operation handle.
682 * @param uVersion Data layout version.
683 * @param uPass The data pass.
684 */
685static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
686{
687 /*
688 * Validate version.
689 */
690 if ( uVersion > EM_SAVED_STATE_VERSION
691 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
692 {
693 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
694 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
695 }
696 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
697
698 /*
699 * Load the saved state.
700 */
701 for (VMCPUID i = 0; i < pVM->cCpus; i++)
702 {
703 PVMCPU pVCpu = &pVM->aCpus[i];
704
705 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
706 if (RT_FAILURE(rc))
707 pVCpu->em.s.fForceRAW = false;
708 AssertRCReturn(rc, rc);
709
710 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
711 {
712 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
713 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
714 AssertRCReturn(rc, rc);
715 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
716
717 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
718 }
719 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
720 {
721 /* Load mwait state. */
722 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
723 AssertRCReturn(rc, rc);
724 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
725 AssertRCReturn(rc, rc);
726 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
727 AssertRCReturn(rc, rc);
728 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
729 AssertRCReturn(rc, rc);
730 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
731 AssertRCReturn(rc, rc);
732 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
733 AssertRCReturn(rc, rc);
734 }
735
736 Assert(!pVCpu->em.s.pCliStatTree);
737 }
738 return VINF_SUCCESS;
739}
740
741
742/**
743 * Argument packet for emR3SetExecutionPolicy.
744 */
745struct EMR3SETEXECPOLICYARGS
746{
747 EMEXECPOLICY enmPolicy;
748 bool fEnforce;
749};
750
751
752/**
753 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
754 */
755static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
756{
757 /*
758 * Only the first CPU changes the variables.
759 */
760 if (pVCpu->idCpu == 0)
761 {
762 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
763 switch (pArgs->enmPolicy)
764 {
765 case EMEXECPOLICY_RECOMPILE_RING0:
766 pVM->fRecompileSupervisor = pArgs->fEnforce;
767 break;
768 case EMEXECPOLICY_RECOMPILE_RING3:
769 pVM->fRecompileUser = pArgs->fEnforce;
770 break;
771 case EMEXECPOLICY_IEM_ALL:
772 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
773 break;
774 default:
775 AssertFailedReturn(VERR_INVALID_PARAMETER);
776 }
777 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
778 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
779 }
780
781 /*
782 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
783 */
784 return pVCpu->em.s.enmState == EMSTATE_RAW
785 || pVCpu->em.s.enmState == EMSTATE_HM
786 || pVCpu->em.s.enmState == EMSTATE_NEM
787 || pVCpu->em.s.enmState == EMSTATE_IEM
788 || pVCpu->em.s.enmState == EMSTATE_REM
789 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
790 ? VINF_EM_RESCHEDULE
791 : VINF_SUCCESS;
792}
793
794
795/**
796 * Changes an execution scheduling policy parameter.
797 *
798 * This is used to enable or disable raw-mode / hardware-virtualization
799 * execution of user and supervisor code.
800 *
801 * @returns VINF_SUCCESS on success.
802 * @returns VINF_RESCHEDULE if a rescheduling might be required.
803 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
804 *
805 * @param pUVM The user mode VM handle.
806 * @param enmPolicy The scheduling policy to change.
807 * @param fEnforce Whether to enforce the policy or not.
808 */
809VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
810{
811 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
812 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
813 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
814
815 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
816 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
817}
818
819
820/**
821 * Queries an execution scheduling policy parameter.
822 *
823 * @returns VBox status code
824 * @param pUVM The user mode VM handle.
825 * @param enmPolicy The scheduling policy to query.
826 * @param pfEnforced Where to return the current value.
827 */
828VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
829{
830 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
831 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
832 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
833 PVM pVM = pUVM->pVM;
834 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
835
836 /* No need to bother EMTs with a query. */
837 switch (enmPolicy)
838 {
839 case EMEXECPOLICY_RECOMPILE_RING0:
840 *pfEnforced = pVM->fRecompileSupervisor;
841 break;
842 case EMEXECPOLICY_RECOMPILE_RING3:
843 *pfEnforced = pVM->fRecompileUser;
844 break;
845 case EMEXECPOLICY_IEM_ALL:
846 *pfEnforced = pVM->em.s.fIemExecutesAll;
847 break;
848 default:
849 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
850 }
851
852 return VINF_SUCCESS;
853}
854
855
856/**
857 * Queries the main execution engine of the VM.
858 *
859 * @returns VBox status code
860 * @param pUVM The user mode VM handle.
861 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
862 */
863VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
864{
865 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
866 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
867
868 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
869 PVM pVM = pUVM->pVM;
870 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
871
872 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
873 return VINF_SUCCESS;
874}
875
876
877/**
878 * Raise a fatal error.
879 *
880 * Safely terminate the VM with full state report and stuff. This function
881 * will naturally never return.
882 *
883 * @param pVCpu The cross context virtual CPU structure.
884 * @param rc VBox status code.
885 */
886VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
887{
888 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
889 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
890}
891
892
893#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
894/**
895 * Gets the EM state name.
896 *
897 * @returns pointer to read only state name,
898 * @param enmState The state.
899 */
900static const char *emR3GetStateName(EMSTATE enmState)
901{
902 switch (enmState)
903 {
904 case EMSTATE_NONE: return "EMSTATE_NONE";
905 case EMSTATE_RAW: return "EMSTATE_RAW";
906 case EMSTATE_HM: return "EMSTATE_HM";
907 case EMSTATE_IEM: return "EMSTATE_IEM";
908 case EMSTATE_REM: return "EMSTATE_REM";
909 case EMSTATE_HALTED: return "EMSTATE_HALTED";
910 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
911 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
912 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
913 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
914 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
915 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
916 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
917 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
918 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
919 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
920 case EMSTATE_NEM: return "EMSTATE_NEM";
921 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
922 default: return "Unknown!";
923 }
924}
925#endif /* LOG_ENABLED || VBOX_STRICT */
926
927
928/**
929 * Handle pending ring-3 I/O port write.
930 *
931 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
932 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
933 *
934 * @returns Strict VBox status code.
935 * @param pVM The cross context VM structure.
936 * @param pVCpu The cross context virtual CPU structure.
937 */
938VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
939{
940 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
941
942 /* Get and clear the pending data. */
943 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
944 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
945 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
946 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
947 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
948
949 /* Assert sanity. */
950 switch (cbValue)
951 {
952 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
953 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
954 case 4: break;
955 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
956 }
957 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
958
959 /* Do the work.*/
960 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
961 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
962 if (IOM_SUCCESS(rcStrict))
963 {
964 pVCpu->cpum.GstCtx.rip += cbInstr;
965 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
966 }
967 return rcStrict;
968}
969
970
971/**
972 * Handle pending ring-3 I/O port write.
973 *
974 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
975 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
976 *
977 * @returns Strict VBox status code.
978 * @param pVM The cross context VM structure.
979 * @param pVCpu The cross context virtual CPU structure.
980 */
981VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
982{
983 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
984
985 /* Get and clear the pending data. */
986 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
987 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
988 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
989 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
990
991 /* Assert sanity. */
992 switch (cbValue)
993 {
994 case 1: break;
995 case 2: break;
996 case 4: break;
997 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
998 }
999 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
1000 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
1001
1002 /* Do the work.*/
1003 uint32_t uValue = 0;
1004 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
1005 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
1006 if (IOM_SUCCESS(rcStrict))
1007 {
1008 if (cbValue == 4)
1009 pVCpu->cpum.GstCtx.rax = uValue;
1010 else if (cbValue == 2)
1011 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
1012 else
1013 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
1014 pVCpu->cpum.GstCtx.rip += cbInstr;
1015 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1016 }
1017 return rcStrict;
1018}
1019
1020
1021/**
1022 * Debug loop.
1023 *
1024 * @returns VBox status code for EM.
1025 * @param pVM The cross context VM structure.
1026 * @param pVCpu The cross context virtual CPU structure.
1027 * @param rc Current EM VBox status code.
1028 */
1029static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1030{
1031 for (;;)
1032 {
1033 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1034 const VBOXSTRICTRC rcLast = rc;
1035
1036 /*
1037 * Debug related RC.
1038 */
1039 switch (VBOXSTRICTRC_VAL(rc))
1040 {
1041 /*
1042 * Single step an instruction.
1043 */
1044 case VINF_EM_DBG_STEP:
1045 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
1046 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
1047 || pVCpu->em.s.fForceRAW /* paranoia */)
1048#ifdef VBOX_WITH_RAW_MODE
1049 rc = emR3RawStep(pVM, pVCpu);
1050#else
1051 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
1052#endif
1053 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
1054 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
1055 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
1056 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
1057#ifdef VBOX_WITH_REM
1058 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
1059 rc = emR3RemStep(pVM, pVCpu);
1060#endif
1061 else
1062 {
1063 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
1064 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
1065 rc = VINF_EM_DBG_STEPPED;
1066 }
1067 break;
1068
1069 /*
1070 * Simple events: stepped, breakpoint, stop/assertion.
1071 */
1072 case VINF_EM_DBG_STEPPED:
1073 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
1074 break;
1075
1076 case VINF_EM_DBG_BREAKPOINT:
1077 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
1078 break;
1079
1080 case VINF_EM_DBG_STOP:
1081 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
1082 break;
1083
1084 case VINF_EM_DBG_EVENT:
1085 rc = DBGFR3EventHandlePending(pVM, pVCpu);
1086 break;
1087
1088 case VINF_EM_DBG_HYPER_STEPPED:
1089 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
1090 break;
1091
1092 case VINF_EM_DBG_HYPER_BREAKPOINT:
1093 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
1094 break;
1095
1096 case VINF_EM_DBG_HYPER_ASSERTION:
1097 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1098 RTLogFlush(NULL);
1099 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1100 break;
1101
1102 /*
1103 * Guru meditation.
1104 */
1105 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
1106 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
1107 break;
1108 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
1109 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
1110 break;
1111 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
1112 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
1113 break;
1114
1115 default: /** @todo don't use default for guru, but make special errors code! */
1116 {
1117 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1118 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
1119 break;
1120 }
1121 }
1122
1123 /*
1124 * Process the result.
1125 */
1126 switch (VBOXSTRICTRC_VAL(rc))
1127 {
1128 /*
1129 * Continue the debugging loop.
1130 */
1131 case VINF_EM_DBG_STEP:
1132 case VINF_EM_DBG_STOP:
1133 case VINF_EM_DBG_EVENT:
1134 case VINF_EM_DBG_STEPPED:
1135 case VINF_EM_DBG_BREAKPOINT:
1136 case VINF_EM_DBG_HYPER_STEPPED:
1137 case VINF_EM_DBG_HYPER_BREAKPOINT:
1138 case VINF_EM_DBG_HYPER_ASSERTION:
1139 break;
1140
1141 /*
1142 * Resuming execution (in some form) has to be done here if we got
1143 * a hypervisor debug event.
1144 */
1145 case VINF_SUCCESS:
1146 case VINF_EM_RESUME:
1147 case VINF_EM_SUSPEND:
1148 case VINF_EM_RESCHEDULE:
1149 case VINF_EM_RESCHEDULE_RAW:
1150 case VINF_EM_RESCHEDULE_REM:
1151 case VINF_EM_HALT:
1152 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1153 {
1154#ifdef VBOX_WITH_RAW_MODE
1155 rc = emR3RawResumeHyper(pVM, pVCpu);
1156 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
1157 continue;
1158#else
1159 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
1160#endif
1161 }
1162 if (rc == VINF_SUCCESS)
1163 rc = VINF_EM_RESCHEDULE;
1164 return rc;
1165
1166 /*
1167 * The debugger isn't attached.
1168 * We'll simply turn the thing off since that's the easiest thing to do.
1169 */
1170 case VERR_DBGF_NOT_ATTACHED:
1171 switch (VBOXSTRICTRC_VAL(rcLast))
1172 {
1173 case VINF_EM_DBG_HYPER_STEPPED:
1174 case VINF_EM_DBG_HYPER_BREAKPOINT:
1175 case VINF_EM_DBG_HYPER_ASSERTION:
1176 case VERR_TRPM_PANIC:
1177 case VERR_TRPM_DONT_PANIC:
1178 case VERR_VMM_RING0_ASSERTION:
1179 case VERR_VMM_HYPER_CR3_MISMATCH:
1180 case VERR_VMM_RING3_CALL_DISABLED:
1181 return rcLast;
1182 }
1183 return VINF_EM_OFF;
1184
1185 /*
1186 * Status codes terminating the VM in one or another sense.
1187 */
1188 case VINF_EM_TERMINATE:
1189 case VINF_EM_OFF:
1190 case VINF_EM_RESET:
1191 case VINF_EM_NO_MEMORY:
1192 case VINF_EM_RAW_STALE_SELECTOR:
1193 case VINF_EM_RAW_IRET_TRAP:
1194 case VERR_TRPM_PANIC:
1195 case VERR_TRPM_DONT_PANIC:
1196 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1197 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1198 case VERR_VMM_RING0_ASSERTION:
1199 case VERR_VMM_HYPER_CR3_MISMATCH:
1200 case VERR_VMM_RING3_CALL_DISABLED:
1201 case VERR_INTERNAL_ERROR:
1202 case VERR_INTERNAL_ERROR_2:
1203 case VERR_INTERNAL_ERROR_3:
1204 case VERR_INTERNAL_ERROR_4:
1205 case VERR_INTERNAL_ERROR_5:
1206 case VERR_IPE_UNEXPECTED_STATUS:
1207 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1208 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1209 return rc;
1210
1211 /*
1212 * The rest is unexpected, and will keep us here.
1213 */
1214 default:
1215 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1216 break;
1217 }
1218 } /* debug for ever */
1219}
1220
1221
1222#if defined(VBOX_WITH_REM) || defined(DEBUG)
1223/**
1224 * Steps recompiled code.
1225 *
1226 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1227 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1228 *
1229 * @param pVM The cross context VM structure.
1230 * @param pVCpu The cross context virtual CPU structure.
1231 */
1232static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1233{
1234 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1235
1236# ifdef VBOX_WITH_REM
1237 EMRemLock(pVM);
1238
1239 /*
1240 * Switch to REM, step instruction, switch back.
1241 */
1242 int rc = REMR3State(pVM, pVCpu);
1243 if (RT_SUCCESS(rc))
1244 {
1245 rc = REMR3Step(pVM, pVCpu);
1246 REMR3StateBack(pVM, pVCpu);
1247 }
1248 EMRemUnlock(pVM);
1249
1250# else
1251 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1252# endif
1253
1254 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1255 return rc;
1256}
1257#endif /* VBOX_WITH_REM || DEBUG */
1258
1259
1260#ifdef VBOX_WITH_REM
1261/**
1262 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1263 * critical section.
1264 *
1265 * @returns false - new fInREMState value.
1266 * @param pVM The cross context VM structure.
1267 * @param pVCpu The cross context virtual CPU structure.
1268 */
1269DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1270{
1271 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1272 REMR3StateBack(pVM, pVCpu);
1273 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1274
1275 EMRemUnlock(pVM);
1276 return false;
1277}
1278#endif
1279
1280
1281/**
1282 * Executes recompiled code.
1283 *
1284 * This function contains the recompiler version of the inner
1285 * execution loop (the outer loop being in EMR3ExecuteVM()).
1286 *
1287 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1288 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1289 *
1290 * @param pVM The cross context VM structure.
1291 * @param pVCpu The cross context virtual CPU structure.
1292 * @param pfFFDone Where to store an indicator telling whether or not
1293 * FFs were done before returning.
1294 *
1295 */
1296static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1297{
1298#ifdef LOG_ENABLED
1299 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1300
1301 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1302 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1303 else
1304 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1305#endif
1306 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1307
1308#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1309 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1310 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1311 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1312#endif
1313
1314 /*
1315 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1316 * or the REM suggests raw-mode execution.
1317 */
1318 *pfFFDone = false;
1319#ifdef VBOX_WITH_REM
1320 bool fInREMState = false;
1321#else
1322 uint32_t cLoops = 0;
1323#endif
1324 int rc = VINF_SUCCESS;
1325 for (;;)
1326 {
1327#ifdef VBOX_WITH_REM
1328 /*
1329 * Lock REM and update the state if not already in sync.
1330 *
1331 * Note! Big lock, but you are not supposed to own any lock when
1332 * coming in here.
1333 */
1334 if (!fInREMState)
1335 {
1336 EMRemLock(pVM);
1337 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1338
1339 /* Flush the recompiler translation blocks if the VCPU has changed,
1340 also force a full CPU state resync. */
1341 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1342 {
1343 REMFlushTBs(pVM);
1344 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1345 }
1346 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1347
1348 rc = REMR3State(pVM, pVCpu);
1349
1350 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1351 if (RT_FAILURE(rc))
1352 break;
1353 fInREMState = true;
1354
1355 /*
1356 * We might have missed the raising of VMREQ, TIMER and some other
1357 * important FFs while we were busy switching the state. So, check again.
1358 */
1359 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1360 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1361 {
1362 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1363 goto l_REMDoForcedActions;
1364 }
1365 }
1366#endif
1367
1368 /*
1369 * Execute REM.
1370 */
1371 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1372 {
1373 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1374#ifdef VBOX_WITH_REM
1375 rc = REMR3Run(pVM, pVCpu);
1376#else
1377 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1378#endif
1379 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1380 }
1381 else
1382 {
1383 /* Give up this time slice; virtual time continues */
1384 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1385 RTThreadSleep(5);
1386 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1387 rc = VINF_SUCCESS;
1388 }
1389
1390 /*
1391 * Deal with high priority post execution FFs before doing anything
1392 * else. Sync back the state and leave the lock to be on the safe side.
1393 */
1394 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1395 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1396 {
1397#ifdef VBOX_WITH_REM
1398 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1399#endif
1400 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1401 }
1402
1403 /*
1404 * Process the returned status code.
1405 */
1406 if (rc != VINF_SUCCESS)
1407 {
1408 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1409 break;
1410 if (rc != VINF_REM_INTERRUPED_FF)
1411 {
1412#ifndef VBOX_WITH_REM
1413 /* Try dodge unimplemented IEM trouble by reschduling. */
1414 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1415 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1416 {
1417 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1418 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1419 {
1420 rc = VINF_EM_RESCHEDULE;
1421 break;
1422 }
1423 }
1424#endif
1425
1426 /*
1427 * Anything which is not known to us means an internal error
1428 * and the termination of the VM!
1429 */
1430 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1431 break;
1432 }
1433 }
1434
1435
1436 /*
1437 * Check and execute forced actions.
1438 *
1439 * Sync back the VM state and leave the lock before calling any of
1440 * these, you never know what's going to happen here.
1441 */
1442#ifdef VBOX_HIGH_RES_TIMERS_HACK
1443 TMTimerPollVoid(pVM, pVCpu);
1444#endif
1445 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1446 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1447 || VMCPU_FF_IS_PENDING(pVCpu,
1448 VMCPU_FF_ALL_REM_MASK
1449 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1450 {
1451#ifdef VBOX_WITH_REM
1452l_REMDoForcedActions:
1453 if (fInREMState)
1454 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1455#endif
1456 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1457 rc = emR3ForcedActions(pVM, pVCpu, rc);
1458 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1459 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1460 if ( rc != VINF_SUCCESS
1461 && rc != VINF_EM_RESCHEDULE_REM)
1462 {
1463 *pfFFDone = true;
1464 break;
1465 }
1466 }
1467
1468#ifndef VBOX_WITH_REM
1469 /*
1470 * Have to check if we can get back to fast execution mode every so often.
1471 */
1472 if (!(++cLoops & 7))
1473 {
1474 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1475 if ( enmCheck != EMSTATE_REM
1476 && enmCheck != EMSTATE_IEM_THEN_REM)
1477 return VINF_EM_RESCHEDULE;
1478 }
1479#endif
1480
1481 } /* The Inner Loop, recompiled execution mode version. */
1482
1483
1484#ifdef VBOX_WITH_REM
1485 /*
1486 * Returning. Sync back the VM state if required.
1487 */
1488 if (fInREMState)
1489 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1490#endif
1491
1492 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1493 return rc;
1494}
1495
1496
1497#ifdef DEBUG
1498
1499int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1500{
1501 EMSTATE enmOldState = pVCpu->em.s.enmState;
1502
1503 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1504
1505 Log(("Single step BEGIN:\n"));
1506 for (uint32_t i = 0; i < cIterations; i++)
1507 {
1508 DBGFR3PrgStep(pVCpu);
1509 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1510 emR3RemStep(pVM, pVCpu);
1511 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1512 break;
1513 }
1514 Log(("Single step END:\n"));
1515 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1516 pVCpu->em.s.enmState = enmOldState;
1517 return VINF_EM_RESCHEDULE;
1518}
1519
1520#endif /* DEBUG */
1521
1522
1523/**
1524 * Try execute the problematic code in IEM first, then fall back on REM if there
1525 * is too much of it or if IEM doesn't implement something.
1526 *
1527 * @returns Strict VBox status code from IEMExecLots.
1528 * @param pVM The cross context VM structure.
1529 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1530 * @param pfFFDone Force flags done indicator.
1531 *
1532 * @thread EMT(pVCpu)
1533 */
1534static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1535{
1536 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1537 *pfFFDone = false;
1538
1539 /*
1540 * Execute in IEM for a while.
1541 */
1542 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1543 {
1544 uint32_t cInstructions;
1545 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1546 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1547 if (rcStrict != VINF_SUCCESS)
1548 {
1549 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1550 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1551 break;
1552
1553 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1554 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1555 return rcStrict;
1556 }
1557
1558 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1559 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1560 {
1561 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1562 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1563 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1564 pVCpu->em.s.enmState = enmNewState;
1565 return VINF_SUCCESS;
1566 }
1567
1568 /*
1569 * Check for pending actions.
1570 */
1571 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1572 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1573 return VINF_SUCCESS;
1574 }
1575
1576 /*
1577 * Switch to REM.
1578 */
1579 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1580 pVCpu->em.s.enmState = EMSTATE_REM;
1581 return VINF_SUCCESS;
1582}
1583
1584
1585/**
1586 * Decides whether to execute RAW, HWACC or REM.
1587 *
1588 * @returns new EM state
1589 * @param pVM The cross context VM structure.
1590 * @param pVCpu The cross context virtual CPU structure.
1591 */
1592EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1593{
1594 /*
1595 * When forcing raw-mode execution, things are simple.
1596 */
1597 if (pVCpu->em.s.fForceRAW)
1598 return EMSTATE_RAW;
1599
1600 /*
1601 * We stay in the wait for SIPI state unless explicitly told otherwise.
1602 */
1603 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1604 return EMSTATE_WAIT_SIPI;
1605
1606 /*
1607 * Execute everything in IEM?
1608 */
1609 if (pVM->em.s.fIemExecutesAll)
1610 return EMSTATE_IEM;
1611
1612 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1613 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1614 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1615
1616 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1617 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1618 {
1619 if (EMIsHwVirtExecutionEnabled(pVM))
1620 {
1621 if (VM_IS_HM_ENABLED(pVM))
1622 {
1623 if (HMR3CanExecuteGuest(pVM, &pVCpu->cpum.GstCtx))
1624 return EMSTATE_HM;
1625 }
1626 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1627 return EMSTATE_NEM;
1628
1629 /*
1630 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1631 * turns off monitoring features essential for raw mode!
1632 */
1633 return EMSTATE_IEM_THEN_REM;
1634 }
1635 }
1636
1637 /*
1638 * Standard raw-mode:
1639 *
1640 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1641 * or 32 bits protected mode ring 0 code
1642 *
1643 * The tests are ordered by the likelihood of being true during normal execution.
1644 */
1645 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1646 {
1647 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1648 return EMSTATE_REM;
1649 }
1650
1651# ifndef VBOX_RAW_V86
1652 if (EFlags.u32 & X86_EFL_VM) {
1653 Log2(("raw mode refused: VM_MASK\n"));
1654 return EMSTATE_REM;
1655 }
1656# endif
1657
1658 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1659 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1660 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1661 {
1662 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1663 return EMSTATE_REM;
1664 }
1665
1666 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1667 {
1668 uint32_t u32Dummy, u32Features;
1669
1670 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1671 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1672 return EMSTATE_REM;
1673 }
1674
1675 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1676 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1677 || (uSS & X86_SEL_RPL) == 3)
1678 {
1679 if (!EMIsRawRing3Enabled(pVM))
1680 return EMSTATE_REM;
1681
1682 if (!(EFlags.u32 & X86_EFL_IF))
1683 {
1684 Log2(("raw mode refused: IF (RawR3)\n"));
1685 return EMSTATE_REM;
1686 }
1687
1688 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1689 {
1690 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1691 return EMSTATE_REM;
1692 }
1693 }
1694 else
1695 {
1696 if (!EMIsRawRing0Enabled(pVM))
1697 return EMSTATE_REM;
1698
1699 if (EMIsRawRing1Enabled(pVM))
1700 {
1701 /* Only ring 0 and 1 supervisor code. */
1702 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1703 {
1704 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1705 return EMSTATE_REM;
1706 }
1707 }
1708 /* Only ring 0 supervisor code. */
1709 else if ((uSS & X86_SEL_RPL) != 0)
1710 {
1711 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1712 return EMSTATE_REM;
1713 }
1714
1715 // Let's start with pure 32 bits ring 0 code first
1716 /** @todo What's pure 32-bit mode? flat? */
1717 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1718 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1719 {
1720 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1721 return EMSTATE_REM;
1722 }
1723
1724 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1725 if (!(u32CR0 & X86_CR0_WP))
1726 {
1727 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1728 return EMSTATE_REM;
1729 }
1730
1731# ifdef VBOX_WITH_RAW_MODE
1732 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pVCpu->cpum.GstCtx.eip))
1733 {
1734 Log2(("raw r0 mode forced: patch code\n"));
1735# ifdef VBOX_WITH_SAFE_STR
1736 Assert(pVCpu->cpum.GstCtx.tr.Sel);
1737# endif
1738 return EMSTATE_RAW;
1739 }
1740# endif /* VBOX_WITH_RAW_MODE */
1741
1742# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1743 if (!(EFlags.u32 & X86_EFL_IF))
1744 {
1745 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1746 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1747 return EMSTATE_REM;
1748 }
1749# endif
1750
1751# ifndef VBOX_WITH_RAW_RING1
1752 /** @todo still necessary??? */
1753 if (EFlags.Bits.u2IOPL != 0)
1754 {
1755 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1756 return EMSTATE_REM;
1757 }
1758# endif
1759 }
1760
1761 /*
1762 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1763 */
1764 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1765 {
1766 Log2(("raw mode refused: stale CS\n"));
1767 return EMSTATE_REM;
1768 }
1769 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1770 {
1771 Log2(("raw mode refused: stale SS\n"));
1772 return EMSTATE_REM;
1773 }
1774 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1775 {
1776 Log2(("raw mode refused: stale DS\n"));
1777 return EMSTATE_REM;
1778 }
1779 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1780 {
1781 Log2(("raw mode refused: stale ES\n"));
1782 return EMSTATE_REM;
1783 }
1784 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1785 {
1786 Log2(("raw mode refused: stale FS\n"));
1787 return EMSTATE_REM;
1788 }
1789 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1790 {
1791 Log2(("raw mode refused: stale GS\n"));
1792 return EMSTATE_REM;
1793 }
1794
1795# ifdef VBOX_WITH_SAFE_STR
1796 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1797 {
1798 Log(("Raw mode refused -> TR=0\n"));
1799 return EMSTATE_REM;
1800 }
1801# endif
1802
1803 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1804 return EMSTATE_RAW;
1805}
1806
1807
1808/**
1809 * Executes all high priority post execution force actions.
1810 *
1811 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1812 * fatal error status code.
1813 *
1814 * @param pVM The cross context VM structure.
1815 * @param pVCpu The cross context virtual CPU structure.
1816 * @param rc The current strict VBox status code rc.
1817 */
1818VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1819{
1820 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1821
1822 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1823 PDMCritSectBothFF(pVCpu);
1824
1825 /* Update CR3 (Nested Paging case for HM). */
1826 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1827 {
1828 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1829 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1830 if (RT_FAILURE(rc2))
1831 return rc2;
1832 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1833 }
1834
1835 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1836 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1837 {
1838 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1839 if (CPUMIsGuestInPAEMode(pVCpu))
1840 {
1841 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1842 AssertPtr(pPdpes);
1843
1844 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1845 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1846 }
1847 else
1848 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1849 }
1850
1851 /* IEM has pending work (typically memory write after INS instruction). */
1852 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1853 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1854
1855 /* IOM has pending work (comitting an I/O or MMIO write). */
1856 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1857 {
1858 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1859 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1860 { /* half likely, or at least it's a line shorter. */ }
1861 else if (rc == VINF_SUCCESS)
1862 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1863 else
1864 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1865 }
1866
1867#ifdef VBOX_WITH_RAW_MODE
1868 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1869 CSAMR3DoPendingAction(pVM, pVCpu);
1870#endif
1871
1872 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1873 {
1874 if ( rc > VINF_EM_NO_MEMORY
1875 && rc <= VINF_EM_LAST)
1876 rc = VINF_EM_NO_MEMORY;
1877 }
1878
1879 return rc;
1880}
1881
1882#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1883/**
1884 * Helper for emR3ForcedActions() for injecting interrupts into the
1885 * nested-guest.
1886 *
1887 * @returns VBox status code.
1888 * @param pVCpu The cross context virtual CPU structure.
1889 * @param pfResched Where to store whether a reschedule is required.
1890 * @param pfInject Where to store whether an interrupt was injected (and if
1891 * a wake up is pending).
1892 */
1893static int emR3NstGstInjectIntr(PVMCPU pVCpu, bool *pfResched, bool *pfInject)
1894{
1895 *pfResched = false;
1896 *pfInject = false;
1897 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1898 {
1899 PVM pVM = pVCpu->CTX_SUFF(pVM);
1900 Assert(pVCpu->cpum.GstCtx.hwvirt.fGif);
1901 bool fVirtualGif = CPUMGetSvmNstGstVGif(&pVCpu->cpum.GstCtx);
1902#ifdef VBOX_WITH_RAW_MODE
1903 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip);
1904#endif
1905 if (fVirtualGif)
1906 {
1907 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, &pVCpu->cpum.GstCtx))
1908 {
1909 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1910 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1911 {
1912 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1913 {
1914 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1915 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1916 if (RT_SUCCESS(rcStrict))
1917 {
1918 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1919 * doesn't intercept HLT but intercepts INTR? */
1920 *pfResched = true;
1921 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1922 if (rcStrict == VINF_SVM_VMEXIT)
1923 return VINF_SUCCESS;
1924 return VBOXSTRICTRC_VAL(rcStrict);
1925 }
1926
1927 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1928 return VINF_EM_TRIPLE_FAULT;
1929 }
1930
1931 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1932 /** @todo this really isn't nice, should properly handle this */
1933 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1934 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1935 Assert(rc != VINF_PGM_CHANGE_MODE);
1936 if (rc == VINF_SVM_VMEXIT)
1937 rc = VINF_SUCCESS;
1938 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1939 || rc == VINF_EM_RESCHEDULE_HM
1940 || rc == VINF_EM_RESCHEDULE_RAW))
1941 {
1942 rc = VINF_EM_RESCHEDULE;
1943 }
1944
1945 *pfResched = true;
1946 *pfInject = true;
1947 return rc;
1948 }
1949 }
1950
1951 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1952 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, &pVCpu->cpum.GstCtx))
1953 {
1954 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1955 {
1956 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1957 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1958 if (RT_SUCCESS(rcStrict))
1959 {
1960 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1961 * doesn't intercept HLT but intercepts VINTR? */
1962 *pfResched = true;
1963 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1964 if (rcStrict == VINF_SVM_VMEXIT)
1965 return VINF_SUCCESS;
1966 return VBOXSTRICTRC_VAL(rcStrict);
1967 }
1968
1969 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1970 return VINF_EM_TRIPLE_FAULT;
1971 }
1972
1973 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1974 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(&pVCpu->cpum.GstCtx);
1975 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1976 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1977 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1978
1979 *pfResched = true;
1980 *pfInject = true;
1981 return VINF_EM_RESCHEDULE;
1982 }
1983 }
1984 return VINF_SUCCESS;
1985 }
1986
1987 if (CPUMIsGuestInVmxNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1988 { /** @todo Nested VMX. */ }
1989
1990 /* Shouldn't really get here. */
1991 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1992 return VERR_EM_INTERNAL_ERROR;
1993}
1994#endif
1995
1996/**
1997 * Executes all pending forced actions.
1998 *
1999 * Forced actions can cause execution delays and execution
2000 * rescheduling. The first we deal with using action priority, so
2001 * that for instance pending timers aren't scheduled and ran until
2002 * right before execution. The rescheduling we deal with using
2003 * return codes. The same goes for VM termination, only in that case
2004 * we exit everything.
2005 *
2006 * @returns VBox status code of equal or greater importance/severity than rc.
2007 * The most important ones are: VINF_EM_RESCHEDULE,
2008 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2009 *
2010 * @param pVM The cross context VM structure.
2011 * @param pVCpu The cross context virtual CPU structure.
2012 * @param rc The current rc.
2013 *
2014 */
2015int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
2016{
2017 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
2018#ifdef VBOX_STRICT
2019 int rcIrq = VINF_SUCCESS;
2020#endif
2021 int rc2;
2022#define UPDATE_RC() \
2023 do { \
2024 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
2025 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
2026 break; \
2027 if (!rc || rc2 < rc) \
2028 rc = rc2; \
2029 } while (0)
2030 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
2031
2032 /*
2033 * Post execution chunk first.
2034 */
2035 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
2036 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
2037 {
2038 /*
2039 * EMT Rendezvous (must be serviced before termination).
2040 */
2041 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2042 {
2043 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2044 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2045 UPDATE_RC();
2046 /** @todo HACK ALERT! The following test is to make sure EM+TM
2047 * thinks the VM is stopped/reset before the next VM state change
2048 * is made. We need a better solution for this, or at least make it
2049 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2050 * VINF_EM_SUSPEND). */
2051 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2052 {
2053 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2054 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2055 return rc;
2056 }
2057 }
2058
2059 /*
2060 * State change request (cleared by vmR3SetStateLocked).
2061 */
2062 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2063 {
2064 VMSTATE enmState = VMR3GetState(pVM);
2065 switch (enmState)
2066 {
2067 case VMSTATE_FATAL_ERROR:
2068 case VMSTATE_FATAL_ERROR_LS:
2069 case VMSTATE_GURU_MEDITATION:
2070 case VMSTATE_GURU_MEDITATION_LS:
2071 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2072 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2073 return VINF_EM_SUSPEND;
2074
2075 case VMSTATE_DESTROYING:
2076 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2077 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2078 return VINF_EM_TERMINATE;
2079
2080 default:
2081 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2082 }
2083 }
2084
2085 /*
2086 * Debugger Facility polling.
2087 */
2088 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2089 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2090 {
2091 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2092 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2093 UPDATE_RC();
2094 }
2095
2096 /*
2097 * Postponed reset request.
2098 */
2099 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
2100 {
2101 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2102 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
2103 UPDATE_RC();
2104 }
2105
2106#ifdef VBOX_WITH_RAW_MODE
2107 /*
2108 * CSAM page scanning.
2109 */
2110 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2111 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
2112 {
2113 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
2114 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
2115 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2116 CSAMR3CheckCodeEx(pVM, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip);
2117 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
2118 }
2119#endif
2120
2121 /*
2122 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
2123 */
2124 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2125 {
2126 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2127 UPDATE_RC();
2128 if (rc == VINF_EM_NO_MEMORY)
2129 return rc;
2130 }
2131
2132 /* check that we got them all */
2133 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2134 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
2135 }
2136
2137 /*
2138 * Normal priority then.
2139 * (Executed in no particular order.)
2140 */
2141 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
2142 {
2143 /*
2144 * PDM Queues are pending.
2145 */
2146 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
2147 PDMR3QueueFlushAll(pVM);
2148
2149 /*
2150 * PDM DMA transfers are pending.
2151 */
2152 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
2153 PDMR3DmaRun(pVM);
2154
2155 /*
2156 * EMT Rendezvous (make sure they are handled before the requests).
2157 */
2158 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2159 {
2160 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2161 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2162 UPDATE_RC();
2163 /** @todo HACK ALERT! The following test is to make sure EM+TM
2164 * thinks the VM is stopped/reset before the next VM state change
2165 * is made. We need a better solution for this, or at least make it
2166 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2167 * VINF_EM_SUSPEND). */
2168 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2169 {
2170 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2171 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2172 return rc;
2173 }
2174 }
2175
2176 /*
2177 * Requests from other threads.
2178 */
2179 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
2180 {
2181 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2182 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
2183 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
2184 {
2185 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2186 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2187 return rc2;
2188 }
2189 UPDATE_RC();
2190 /** @todo HACK ALERT! The following test is to make sure EM+TM
2191 * thinks the VM is stopped/reset before the next VM state change
2192 * is made. We need a better solution for this, or at least make it
2193 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2194 * VINF_EM_SUSPEND). */
2195 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2196 {
2197 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2198 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2199 return rc;
2200 }
2201 }
2202
2203#ifdef VBOX_WITH_REM
2204 /* Replay the handler notification changes. */
2205 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2206 {
2207 /* Try not to cause deadlocks. */
2208 if ( pVM->cCpus == 1
2209 || ( !PGMIsLockOwner(pVM)
2210 && !IOMIsLockWriteOwner(pVM))
2211 )
2212 {
2213 EMRemLock(pVM);
2214 REMR3ReplayHandlerNotifications(pVM);
2215 EMRemUnlock(pVM);
2216 }
2217 }
2218#endif
2219
2220 /* check that we got them all */
2221 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2222 }
2223
2224 /*
2225 * Normal priority then. (per-VCPU)
2226 * (Executed in no particular order.)
2227 */
2228 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2229 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2230 {
2231 /*
2232 * Requests from other threads.
2233 */
2234 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2235 {
2236 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2237 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2238 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2239 {
2240 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2241 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2242 return rc2;
2243 }
2244 UPDATE_RC();
2245 /** @todo HACK ALERT! The following test is to make sure EM+TM
2246 * thinks the VM is stopped/reset before the next VM state change
2247 * is made. We need a better solution for this, or at least make it
2248 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2249 * VINF_EM_SUSPEND). */
2250 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2251 {
2252 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2253 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2254 return rc;
2255 }
2256 }
2257
2258 /* check that we got them all */
2259 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2260 }
2261
2262 /*
2263 * High priority pre execution chunk last.
2264 * (Executed in ascending priority order.)
2265 */
2266 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2267 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2268 {
2269 /*
2270 * Timers before interrupts.
2271 */
2272 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2273 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2274 TMR3TimerQueuesDo(pVM);
2275
2276 /*
2277 * Pick up asynchronously posted interrupts into the APIC.
2278 */
2279 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2280 APICUpdatePendingInterrupts(pVCpu);
2281
2282 /*
2283 * The instruction following an emulated STI should *always* be executed!
2284 *
2285 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2286 * the eip is the same as the inhibited instr address. Before we
2287 * are able to execute this instruction in raw mode (iret to
2288 * guest code) an external interrupt might force a world switch
2289 * again. Possibly allowing a guest interrupt to be dispatched
2290 * in the process. This could break the guest. Sounds very
2291 * unlikely, but such timing sensitive problem are not as rare as
2292 * you might think.
2293 */
2294 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2295 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2296 {
2297 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2298 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2299 {
2300 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2301 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2302 }
2303 else
2304 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2305 }
2306
2307 /*
2308 * Interrupts.
2309 */
2310 bool fWakeupPending = false;
2311 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2312 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2313 {
2314 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2315 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2316 {
2317 Assert(!HMR3IsEventPending(pVCpu));
2318#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2319 if (CPUMIsGuestInNestedHwVirtMode(&pVCpu->cpum.GstCtx))
2320 {
2321 bool fResched, fInject;
2322 rc2 = emR3NstGstInjectIntr(pVCpu, &fResched, &fInject);
2323 if (fInject)
2324 {
2325 fWakeupPending = true;
2326# ifdef VBOX_STRICT
2327 rcIrq = rc2;
2328# endif
2329 }
2330 if (fResched)
2331 UPDATE_RC();
2332 }
2333 else
2334#endif
2335 {
2336 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2337 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2338#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2339 && pVCpu->cpum.GstCtx.hwvirt.fGif
2340#endif
2341#ifdef VBOX_WITH_RAW_MODE
2342 && !PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)
2343#endif
2344 && pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
2345 {
2346 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2347 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2348 /** @todo this really isn't nice, should properly handle this */
2349 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2350 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2351 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2352 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2353 || rc2 == VINF_EM_RESCHEDULE_HM
2354 || rc2 == VINF_EM_RESCHEDULE_RAW))
2355 {
2356 rc2 = VINF_EM_RESCHEDULE;
2357 }
2358#ifdef VBOX_STRICT
2359 rcIrq = rc2;
2360#endif
2361 UPDATE_RC();
2362 /* Reschedule required: We must not miss the wakeup below! */
2363 fWakeupPending = true;
2364 }
2365 }
2366 }
2367 }
2368
2369 /*
2370 * Allocate handy pages.
2371 */
2372 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2373 {
2374 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2375 UPDATE_RC();
2376 }
2377
2378 /*
2379 * Debugger Facility request.
2380 */
2381 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2382 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2383 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2384 {
2385 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2386 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2387 UPDATE_RC();
2388 }
2389
2390 /*
2391 * EMT Rendezvous (must be serviced before termination).
2392 */
2393 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2394 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2395 {
2396 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2397 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2398 UPDATE_RC();
2399 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2400 * stopped/reset before the next VM state change is made. We need a better
2401 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2402 * && rc >= VINF_EM_SUSPEND). */
2403 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2404 {
2405 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2406 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2407 return rc;
2408 }
2409 }
2410
2411 /*
2412 * State change request (cleared by vmR3SetStateLocked).
2413 */
2414 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2415 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2416 {
2417 VMSTATE enmState = VMR3GetState(pVM);
2418 switch (enmState)
2419 {
2420 case VMSTATE_FATAL_ERROR:
2421 case VMSTATE_FATAL_ERROR_LS:
2422 case VMSTATE_GURU_MEDITATION:
2423 case VMSTATE_GURU_MEDITATION_LS:
2424 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2425 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2426 return VINF_EM_SUSPEND;
2427
2428 case VMSTATE_DESTROYING:
2429 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2430 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2431 return VINF_EM_TERMINATE;
2432
2433 default:
2434 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2435 }
2436 }
2437
2438 /*
2439 * Out of memory? Since most of our fellow high priority actions may cause us
2440 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2441 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2442 * than us since we can terminate without allocating more memory.
2443 */
2444 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2445 {
2446 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2447 UPDATE_RC();
2448 if (rc == VINF_EM_NO_MEMORY)
2449 return rc;
2450 }
2451
2452 /*
2453 * If the virtual sync clock is still stopped, make TM restart it.
2454 */
2455 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2456 TMR3VirtualSyncFF(pVM, pVCpu);
2457
2458#ifdef DEBUG
2459 /*
2460 * Debug, pause the VM.
2461 */
2462 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2463 {
2464 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2465 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2466 return VINF_EM_SUSPEND;
2467 }
2468#endif
2469
2470 /* check that we got them all */
2471 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2472 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2473 }
2474
2475#undef UPDATE_RC
2476 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2477 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2478 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2479 return rc;
2480}
2481
2482
2483/**
2484 * Check if the preset execution time cap restricts guest execution scheduling.
2485 *
2486 * @returns true if allowed, false otherwise
2487 * @param pVM The cross context VM structure.
2488 * @param pVCpu The cross context virtual CPU structure.
2489 */
2490bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2491{
2492 uint64_t u64UserTime, u64KernelTime;
2493
2494 if ( pVM->uCpuExecutionCap != 100
2495 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2496 {
2497 uint64_t u64TimeNow = RTTimeMilliTS();
2498 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2499 {
2500 /* New time slice. */
2501 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2502 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2503 pVCpu->em.s.u64TimeSliceExec = 0;
2504 }
2505 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2506
2507 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2508 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2509 return false;
2510 }
2511 return true;
2512}
2513
2514
2515/**
2516 * Execute VM.
2517 *
2518 * This function is the main loop of the VM. The emulation thread
2519 * calls this function when the VM has been successfully constructed
2520 * and we're ready for executing the VM.
2521 *
2522 * Returning from this function means that the VM is turned off or
2523 * suspended (state already saved) and deconstruction is next in line.
2524 *
2525 * All interaction from other thread are done using forced actions
2526 * and signaling of the wait object.
2527 *
2528 * @returns VBox status code, informational status codes may indicate failure.
2529 * @param pVM The cross context VM structure.
2530 * @param pVCpu The cross context virtual CPU structure.
2531 */
2532VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2533{
2534 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2535 pVM,
2536 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2537 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2538 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2539 pVCpu->em.s.fForceRAW));
2540 VM_ASSERT_EMT(pVM);
2541 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2542 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2543 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2544 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2545
2546 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2547 if (rc == 0)
2548 {
2549 /*
2550 * Start the virtual time.
2551 */
2552 TMR3NotifyResume(pVM, pVCpu);
2553
2554 /*
2555 * The Outer Main Loop.
2556 */
2557 bool fFFDone = false;
2558
2559 /* Reschedule right away to start in the right state. */
2560 rc = VINF_SUCCESS;
2561
2562 /* If resuming after a pause or a state load, restore the previous
2563 state or else we'll start executing code. Else, just reschedule. */
2564 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2565 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2566 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2567 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2568 else
2569 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2570 pVCpu->em.s.cIemThenRemInstructions = 0;
2571 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2572
2573 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2574 for (;;)
2575 {
2576 /*
2577 * Before we can schedule anything (we're here because
2578 * scheduling is required) we must service any pending
2579 * forced actions to avoid any pending action causing
2580 * immediate rescheduling upon entering an inner loop
2581 *
2582 * Do forced actions.
2583 */
2584 if ( !fFFDone
2585 && RT_SUCCESS(rc)
2586 && rc != VINF_EM_TERMINATE
2587 && rc != VINF_EM_OFF
2588 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2589 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2590 {
2591 rc = emR3ForcedActions(pVM, pVCpu, rc);
2592 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2593 if ( ( rc == VINF_EM_RESCHEDULE_REM
2594 || rc == VINF_EM_RESCHEDULE_HM)
2595 && pVCpu->em.s.fForceRAW)
2596 rc = VINF_EM_RESCHEDULE_RAW;
2597 }
2598 else if (fFFDone)
2599 fFFDone = false;
2600
2601 /*
2602 * Now what to do?
2603 */
2604 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2605 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2606 switch (rc)
2607 {
2608 /*
2609 * Keep doing what we're currently doing.
2610 */
2611 case VINF_SUCCESS:
2612 break;
2613
2614 /*
2615 * Reschedule - to raw-mode execution.
2616 */
2617/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2618 case VINF_EM_RESCHEDULE_RAW:
2619 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2620 if (VM_IS_RAW_MODE_ENABLED(pVM))
2621 {
2622 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2623 pVCpu->em.s.enmState = EMSTATE_RAW;
2624 }
2625 else
2626 {
2627 AssertLogRelFailed();
2628 pVCpu->em.s.enmState = EMSTATE_NONE;
2629 }
2630 break;
2631
2632 /*
2633 * Reschedule - to HM or NEM.
2634 */
2635 case VINF_EM_RESCHEDULE_HM:
2636 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2637 Assert(!pVCpu->em.s.fForceRAW);
2638 if (VM_IS_HM_ENABLED(pVM))
2639 {
2640 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2641 pVCpu->em.s.enmState = EMSTATE_HM;
2642 }
2643 else if (VM_IS_NEM_ENABLED(pVM))
2644 {
2645 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2646 pVCpu->em.s.enmState = EMSTATE_NEM;
2647 }
2648 else
2649 {
2650 AssertLogRelFailed();
2651 pVCpu->em.s.enmState = EMSTATE_NONE;
2652 }
2653 break;
2654
2655 /*
2656 * Reschedule - to recompiled execution.
2657 */
2658 case VINF_EM_RESCHEDULE_REM:
2659 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2660 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2661 {
2662 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2663 enmOldState, EMSTATE_IEM_THEN_REM));
2664 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2665 {
2666 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2667 pVCpu->em.s.cIemThenRemInstructions = 0;
2668 }
2669 }
2670 else
2671 {
2672 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2673 pVCpu->em.s.enmState = EMSTATE_REM;
2674 }
2675 break;
2676
2677 /*
2678 * Resume.
2679 */
2680 case VINF_EM_RESUME:
2681 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2682 /* Don't reschedule in the halted or wait for SIPI case. */
2683 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2684 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2685 {
2686 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2687 break;
2688 }
2689 /* fall through and get scheduled. */
2690 RT_FALL_THRU();
2691
2692 /*
2693 * Reschedule.
2694 */
2695 case VINF_EM_RESCHEDULE:
2696 {
2697 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2698 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2699 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2700 pVCpu->em.s.cIemThenRemInstructions = 0;
2701 pVCpu->em.s.enmState = enmState;
2702 break;
2703 }
2704
2705 /*
2706 * Halted.
2707 */
2708 case VINF_EM_HALT:
2709 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2710 pVCpu->em.s.enmState = EMSTATE_HALTED;
2711 break;
2712
2713 /*
2714 * Switch to the wait for SIPI state (application processor only)
2715 */
2716 case VINF_EM_WAIT_SIPI:
2717 Assert(pVCpu->idCpu != 0);
2718 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2719 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2720 break;
2721
2722
2723 /*
2724 * Suspend.
2725 */
2726 case VINF_EM_SUSPEND:
2727 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2728 Assert(enmOldState != EMSTATE_SUSPENDED);
2729 pVCpu->em.s.enmPrevState = enmOldState;
2730 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2731 break;
2732
2733 /*
2734 * Reset.
2735 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2736 */
2737 case VINF_EM_RESET:
2738 {
2739 if (pVCpu->idCpu == 0)
2740 {
2741 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2742 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2743 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2744 pVCpu->em.s.cIemThenRemInstructions = 0;
2745 pVCpu->em.s.enmState = enmState;
2746 }
2747 else
2748 {
2749 /* All other VCPUs go into the wait for SIPI state. */
2750 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2751 }
2752 break;
2753 }
2754
2755 /*
2756 * Power Off.
2757 */
2758 case VINF_EM_OFF:
2759 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2760 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2761 TMR3NotifySuspend(pVM, pVCpu);
2762 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2763 return rc;
2764
2765 /*
2766 * Terminate the VM.
2767 */
2768 case VINF_EM_TERMINATE:
2769 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2770 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2771 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2772 TMR3NotifySuspend(pVM, pVCpu);
2773 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2774 return rc;
2775
2776
2777 /*
2778 * Out of memory, suspend the VM and stuff.
2779 */
2780 case VINF_EM_NO_MEMORY:
2781 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2782 Assert(enmOldState != EMSTATE_SUSPENDED);
2783 pVCpu->em.s.enmPrevState = enmOldState;
2784 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2785 TMR3NotifySuspend(pVM, pVCpu);
2786 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2787
2788 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2789 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2790 if (rc != VINF_EM_SUSPEND)
2791 {
2792 if (RT_SUCCESS_NP(rc))
2793 {
2794 AssertLogRelMsgFailed(("%Rrc\n", rc));
2795 rc = VERR_EM_INTERNAL_ERROR;
2796 }
2797 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2798 }
2799 return rc;
2800
2801 /*
2802 * Guest debug events.
2803 */
2804 case VINF_EM_DBG_STEPPED:
2805 case VINF_EM_DBG_STOP:
2806 case VINF_EM_DBG_EVENT:
2807 case VINF_EM_DBG_BREAKPOINT:
2808 case VINF_EM_DBG_STEP:
2809 if (enmOldState == EMSTATE_RAW)
2810 {
2811 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2812 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2813 }
2814 else if (enmOldState == EMSTATE_HM)
2815 {
2816 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2817 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2818 }
2819 else if (enmOldState == EMSTATE_NEM)
2820 {
2821 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2822 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2823 }
2824 else if (enmOldState == EMSTATE_REM)
2825 {
2826 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2827 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2828 }
2829 else
2830 {
2831 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2832 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2833 }
2834 break;
2835
2836 /*
2837 * Hypervisor debug events.
2838 */
2839 case VINF_EM_DBG_HYPER_STEPPED:
2840 case VINF_EM_DBG_HYPER_BREAKPOINT:
2841 case VINF_EM_DBG_HYPER_ASSERTION:
2842 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2843 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2844 break;
2845
2846 /*
2847 * Triple fault.
2848 */
2849 case VINF_EM_TRIPLE_FAULT:
2850 if (!pVM->em.s.fGuruOnTripleFault)
2851 {
2852 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2853 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2854 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2855 continue;
2856 }
2857 /* Else fall through and trigger a guru. */
2858 RT_FALL_THRU();
2859
2860 case VERR_VMM_RING0_ASSERTION:
2861 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2862 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2863 break;
2864
2865 /*
2866 * Any error code showing up here other than the ones we
2867 * know and process above are considered to be FATAL.
2868 *
2869 * Unknown warnings and informational status codes are also
2870 * included in this.
2871 */
2872 default:
2873 if (RT_SUCCESS_NP(rc))
2874 {
2875 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2876 rc = VERR_EM_INTERNAL_ERROR;
2877 }
2878 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2879 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2880 break;
2881 }
2882
2883 /*
2884 * Act on state transition.
2885 */
2886 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2887 if (enmOldState != enmNewState)
2888 {
2889 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2890
2891 /* Clear MWait flags and the unhalt FF. */
2892 if ( enmOldState == EMSTATE_HALTED
2893 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2894 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2895 && ( enmNewState == EMSTATE_RAW
2896 || enmNewState == EMSTATE_HM
2897 || enmNewState == EMSTATE_NEM
2898 || enmNewState == EMSTATE_REM
2899 || enmNewState == EMSTATE_IEM_THEN_REM
2900 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2901 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2902 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2903 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2904 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2905 {
2906 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2907 {
2908 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2909 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2910 }
2911 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2912 {
2913 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2914 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2915 }
2916 }
2917 }
2918 else
2919 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2920
2921 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2922 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2923
2924 /*
2925 * Act on the new state.
2926 */
2927 switch (enmNewState)
2928 {
2929 /*
2930 * Execute raw.
2931 */
2932 case EMSTATE_RAW:
2933#ifdef VBOX_WITH_RAW_MODE
2934 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2935#else
2936 AssertLogRelMsgFailed(("%Rrc\n", rc));
2937 rc = VERR_EM_INTERNAL_ERROR;
2938#endif
2939 break;
2940
2941 /*
2942 * Execute hardware accelerated raw.
2943 */
2944 case EMSTATE_HM:
2945 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2946 break;
2947
2948 /*
2949 * Execute hardware accelerated raw.
2950 */
2951 case EMSTATE_NEM:
2952 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2953 break;
2954
2955 /*
2956 * Execute recompiled.
2957 */
2958 case EMSTATE_REM:
2959 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2960 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2961 break;
2962
2963 /*
2964 * Execute in the interpreter.
2965 */
2966 case EMSTATE_IEM:
2967 {
2968#if 0 /* For testing purposes. */
2969 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2970 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2971 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2972 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2973 rc = VINF_SUCCESS;
2974 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2975#endif
2976 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2977 if (pVM->em.s.fIemExecutesAll)
2978 {
2979 Assert(rc != VINF_EM_RESCHEDULE_REM);
2980 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2981 Assert(rc != VINF_EM_RESCHEDULE_HM);
2982 }
2983 fFFDone = false;
2984 break;
2985 }
2986
2987 /*
2988 * Execute in IEM, hoping we can quickly switch aback to HM
2989 * or RAW execution. If our hopes fail, we go to REM.
2990 */
2991 case EMSTATE_IEM_THEN_REM:
2992 {
2993 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2994 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2995 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2996 break;
2997 }
2998
2999 /*
3000 * Application processor execution halted until SIPI.
3001 */
3002 case EMSTATE_WAIT_SIPI:
3003 /* no break */
3004 /*
3005 * hlt - execution halted until interrupt.
3006 */
3007 case EMSTATE_HALTED:
3008 {
3009 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
3010 /* If HM (or someone else) store a pending interrupt in
3011 TRPM, it must be dispatched ASAP without any halting.
3012 Anything pending in TRPM has been accepted and the CPU
3013 should already be the right state to receive it. */
3014 if (TRPMHasTrap(pVCpu))
3015 rc = VINF_EM_RESCHEDULE;
3016 /* MWAIT has a special extension where it's woken up when
3017 an interrupt is pending even when IF=0. */
3018 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
3019 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
3020 {
3021 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
3022 if (rc == VINF_SUCCESS)
3023 {
3024 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3025 APICUpdatePendingInterrupts(pVCpu);
3026
3027 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3028 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
3029 {
3030 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
3031 rc = VINF_EM_RESCHEDULE;
3032 }
3033 }
3034 }
3035 else
3036 {
3037 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
3038 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
3039 check VMCPU_FF_UPDATE_APIC here. */
3040 if ( rc == VINF_SUCCESS
3041 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
3042 {
3043 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
3044 rc = VINF_EM_RESCHEDULE;
3045 }
3046 }
3047
3048 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
3049 break;
3050 }
3051
3052 /*
3053 * Suspended - return to VM.cpp.
3054 */
3055 case EMSTATE_SUSPENDED:
3056 TMR3NotifySuspend(pVM, pVCpu);
3057 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3058 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3059 return VINF_EM_SUSPEND;
3060
3061 /*
3062 * Debugging in the guest.
3063 */
3064 case EMSTATE_DEBUG_GUEST_RAW:
3065 case EMSTATE_DEBUG_GUEST_HM:
3066 case EMSTATE_DEBUG_GUEST_NEM:
3067 case EMSTATE_DEBUG_GUEST_IEM:
3068 case EMSTATE_DEBUG_GUEST_REM:
3069 TMR3NotifySuspend(pVM, pVCpu);
3070 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3071 TMR3NotifyResume(pVM, pVCpu);
3072 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3073 break;
3074
3075 /*
3076 * Debugging in the hypervisor.
3077 */
3078 case EMSTATE_DEBUG_HYPER:
3079 {
3080 TMR3NotifySuspend(pVM, pVCpu);
3081 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3082
3083 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3084 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3085 if (rc != VINF_SUCCESS)
3086 {
3087 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
3088 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3089 else
3090 {
3091 /* switch to guru meditation mode */
3092 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3093 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3094 VMMR3FatalDump(pVM, pVCpu, rc);
3095 }
3096 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3097 return rc;
3098 }
3099
3100 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3101 TMR3NotifyResume(pVM, pVCpu);
3102 break;
3103 }
3104
3105 /*
3106 * Guru meditation takes place in the debugger.
3107 */
3108 case EMSTATE_GURU_MEDITATION:
3109 {
3110 TMR3NotifySuspend(pVM, pVCpu);
3111 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3112 VMMR3FatalDump(pVM, pVCpu, rc);
3113 emR3Debug(pVM, pVCpu, rc);
3114 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3115 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3116 return rc;
3117 }
3118
3119 /*
3120 * The states we don't expect here.
3121 */
3122 case EMSTATE_NONE:
3123 case EMSTATE_TERMINATING:
3124 default:
3125 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3126 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3127 TMR3NotifySuspend(pVM, pVCpu);
3128 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3129 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3130 return VERR_EM_INTERNAL_ERROR;
3131 }
3132 } /* The Outer Main Loop */
3133 }
3134 else
3135 {
3136 /*
3137 * Fatal error.
3138 */
3139 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3140 TMR3NotifySuspend(pVM, pVCpu);
3141 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3142 VMMR3FatalDump(pVM, pVCpu, rc);
3143 emR3Debug(pVM, pVCpu, rc);
3144 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3145 /** @todo change the VM state! */
3146 return rc;
3147 }
3148
3149 /* not reached */
3150}
3151
3152/**
3153 * Notify EM of a state change (used by FTM)
3154 *
3155 * @param pVM The cross context VM structure.
3156 */
3157VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3158{
3159 PVMCPU pVCpu = VMMGetCpu(pVM);
3160
3161 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3162 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3163 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3164 return VINF_SUCCESS;
3165}
3166
3167/**
3168 * Notify EM of a state change (used by FTM)
3169 *
3170 * @param pVM The cross context VM structure.
3171 */
3172VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3173{
3174 PVMCPU pVCpu = VMMGetCpu(pVM);
3175 EMSTATE enmCurState = pVCpu->em.s.enmState;
3176
3177 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3178 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3179 pVCpu->em.s.enmPrevState = enmCurState;
3180 return VINF_SUCCESS;
3181}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette