VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72660

Last change on this file since 72660 was 72660, checked in by vboxsync, 7 years ago

EM: Made the EMHistoryExec parameters settable via CFGM. bugref:9198

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 147.4 KB
Line 
1/* $Id: EM.cpp 72660 2018-06-22 11:34:15Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94#if defined(VBOX_WITH_REM) || defined(DEBUG)
95static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
96#endif
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
153
154 /** @cfgm{/EM/ExitOptimizationEnabled, bool, true}
155 * Whether to try correlate exit history in any context, detect hot spots and
156 * try optimize these using IEM if there are other exits close by. This
157 * overrides the context specific settings. */
158 bool fExitOptimizationEnabled = true;
159 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
160 AssertLogRelRCReturn(rc, rc);
161
162 /** @cfgm{/EM/ExitOptimizationEnabledR0, bool, true}
163 * Whether to optimize exits in ring-0. Setting this to false will also disable
164 * the /EM/ExitOptimizationEnabledR0PreemptDisabled setting. Depending on preemption
165 * capabilities of the host kernel, this optimization may be unavailable. */
166 bool fExitOptimizationEnabledR0 = true;
167 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0", &fExitOptimizationEnabledR0, true);
168 AssertLogRelRCReturn(rc, rc);
169 fExitOptimizationEnabledR0 &= fExitOptimizationEnabled;
170
171 /** @cfgm{/EM/ExitOptimizationEnabledR0PreemptDisabled, bool, false}
172 * Whether to optimize exits in ring-0 when preemption is disable (or preemption
173 * hooks are in effect). */
174 /** @todo change the default to true here */
175 bool fExitOptimizationEnabledR0PreemptDisabled = true;
176 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabledR0PreemptDisabled", &fExitOptimizationEnabledR0PreemptDisabled, false);
177 AssertLogRelRCReturn(rc, rc);
178 fExitOptimizationEnabledR0PreemptDisabled &= fExitOptimizationEnabledR0;
179
180 /** @cfgm{/EM/HistoryExecMaxInstructions, integer, 16, 65535, 8192}
181 * Maximum number of instruction to let EMHistoryExec execute in one go. */
182 uint16_t cHistoryExecMaxInstructions = 8192;
183 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryExecMaxInstructions", &cHistoryExecMaxInstructions, cHistoryExecMaxInstructions);
184 AssertLogRelRCReturn(rc, rc);
185 if (cHistoryExecMaxInstructions < 16)
186 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS, "/EM/HistoryExecMaxInstructions value is too small, min 16");
187
188 /** @cfgm{/EM/HistoryProbeMaxInstructionsWithoutExit, integer, 2, 65535, 24 for HM, 32 for NEM}
189 * Maximum number of instruction between exits during probing. */
190 uint16_t cHistoryProbeMaxInstructionsWithoutExit = 24;
191#ifdef RT_OS_WINDOWS
192 if (VM_IS_NEM_ENABLED(pVM))
193 cHistoryProbeMaxInstructionsWithoutExit = 32;
194#endif
195 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbeMaxInstructionsWithoutExit", &cHistoryProbeMaxInstructionsWithoutExit,
196 cHistoryProbeMaxInstructionsWithoutExit);
197 AssertLogRelRCReturn(rc, rc);
198 if (cHistoryProbeMaxInstructionsWithoutExit < 2)
199 return VMSetError(pVM, VERR_OUT_OF_RANGE, RT_SRC_POS,
200 "/EM/HistoryProbeMaxInstructionsWithoutExit value is too small, min 16");
201
202 /** @cfgm{/EM/HistoryProbMinInstructions, integer, 0, 65535, depends}
203 * The default is (/EM/HistoryProbeMaxInstructionsWithoutExit + 1) * 3. */
204 uint16_t cHistoryProbeMinInstructions = cHistoryProbeMaxInstructionsWithoutExit < 0x5554
205 ? (cHistoryProbeMaxInstructionsWithoutExit + 1) * 3 : 0xffff;
206 rc = CFGMR3QueryU16Def(pCfgEM, "HistoryProbMinInstructions", &cHistoryProbeMinInstructions,
207 cHistoryProbeMinInstructions);
208 AssertLogRelRCReturn(rc, rc);
209
210 for (VMCPUID i = 0; i < pVM->cCpus; i++)
211 {
212 pVM->aCpus[i].em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
213 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0 = fExitOptimizationEnabledR0;
214 pVM->aCpus[i].em.s.fExitOptimizationEnabledR0PreemptDisabled = fExitOptimizationEnabledR0PreemptDisabled;
215
216 pVM->aCpus[i].em.s.cHistoryExecMaxInstructions = cHistoryExecMaxInstructions;
217 pVM->aCpus[i].em.s.cHistoryProbeMinInstructions = cHistoryProbeMinInstructions;
218 pVM->aCpus[i].em.s.cHistoryProbeMaxInstructionsWithoutExit = cHistoryProbeMaxInstructionsWithoutExit;
219 }
220
221#ifdef VBOX_WITH_REM
222 /*
223 * Initialize the REM critical section.
224 */
225 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
226 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
227 AssertRCReturn(rc, rc);
228#endif
229
230 /*
231 * Saved state.
232 */
233 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
234 NULL, NULL, NULL,
235 NULL, emR3Save, NULL,
236 NULL, emR3Load, NULL);
237 if (RT_FAILURE(rc))
238 return rc;
239
240 for (VMCPUID i = 0; i < pVM->cCpus; i++)
241 {
242 PVMCPU pVCpu = &pVM->aCpus[i];
243
244 pVCpu->em.s.enmState = i == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
245 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
246 pVCpu->em.s.fForceRAW = false;
247 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
248 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
249
250#ifdef VBOX_WITH_RAW_MODE
251 if (VM_IS_RAW_MODE_ENABLED(pVM))
252 {
253 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
254 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
255 }
256#endif
257
258# define EM_REG_COUNTER(a, b, c) \
259 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
260 AssertRC(rc);
261
262# define EM_REG_COUNTER_USED(a, b, c) \
263 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
264 AssertRC(rc);
265
266# define EM_REG_PROFILE(a, b, c) \
267 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
268 AssertRC(rc);
269
270# define EM_REG_PROFILE_ADV(a, b, c) \
271 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
272 AssertRC(rc);
273
274 /*
275 * Statistics.
276 */
277#ifdef VBOX_WITH_STATISTICS
278 PEMSTATS pStats;
279 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
280 if (RT_FAILURE(rc))
281 return rc;
282
283 pVCpu->em.s.pStatsR3 = pStats;
284 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
285 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
286
287 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
288 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
289
290 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
291 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
292
293 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
367
368 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
369 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
370
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
391 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
392 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
393 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
394 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
395 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
396 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
397 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
398 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
399 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
400 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
401 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
402 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
403 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
404 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
405 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
406 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
407 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
408 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
409 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
410 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
411 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
412 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
413 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
414 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
415 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
416 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
417 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
418 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
419 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
420 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
421 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
422 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
423
424 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
425 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
426 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
427 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
428 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
429 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
430 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
431 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
432 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
433 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
434 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
435 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
436 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
437 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
438 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
439 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
440 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
441 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
442 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
443 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
444 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
445 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
446 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
447 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
448 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
449 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
450 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
451 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
452
453 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
454 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
455 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
456 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
457
458 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
459 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
460 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
461 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
462 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
463 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
464 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
465 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
466 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
467 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
468 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
469 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
470 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
471 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
472 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
473 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
474 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
475 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
476 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
477 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
478 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
479 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
480 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
481 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
482 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
483 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
484
485 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
486 pVCpu->em.s.pCliStatTree = 0;
487
488 /* these should be considered for release statistics. */
489 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
490 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
491 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
492 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
493 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
494 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
495 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
496 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
497#endif /* VBOX_WITH_STATISTICS */
498 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
499 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
500#ifdef VBOX_WITH_STATISTICS
501 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
502 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
503 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
504 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
505 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
506 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
507#endif /* VBOX_WITH_STATISTICS */
508
509 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
510 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
511 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
512 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
513 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
514
515 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
516
517 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
518 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", i);
519 AssertRC(rc);
520
521 /* History record statistics */
522 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
523 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", i);
524 AssertRC(rc);
525
526 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
527 {
528 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
529 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", i, iStep);
530 AssertRC(rc);
531 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
532 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", i, iStep);
533 AssertRC(rc);
534 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
535 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", i, iStep);
536 AssertRC(rc);
537 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
538 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", i, iStep);
539 AssertRC(rc);
540 }
541
542 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%d/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
543 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%d/ExitOpt/ExecSavedExit", "Net number of saved exits.");
544 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%d/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
545 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%d/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
546 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%d/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
547 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%d/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
548 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%d/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
549 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%d/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
550 }
551
552 emR3InitDbg(pVM);
553 return VINF_SUCCESS;
554}
555
556
557/**
558 * Called when a VM initialization stage is completed.
559 *
560 * @returns VBox status code.
561 * @param pVM The cross context VM structure.
562 * @param enmWhat The initialization state that was completed.
563 */
564VMMR3_INT_DECL(int) EMR3InitCompleted(PVM pVM, VMINITCOMPLETED enmWhat)
565{
566 if (enmWhat == VMINITCOMPLETED_RING0)
567 LogRel(("EM: Exit history optimizations: enabled=%RTbool enabled-r0=%RTbool enabled-r0-no-preemption=%RTbool\n",
568 pVM->aCpus[0].em.s.fExitOptimizationEnabled, pVM->aCpus[0].em.s.fExitOptimizationEnabledR0,
569 pVM->aCpus[0].em.s.fExitOptimizationEnabledR0PreemptDisabled));
570 return VINF_SUCCESS;
571}
572
573
574/**
575 * Applies relocations to data and code managed by this
576 * component. This function will be called at init and
577 * whenever the VMM need to relocate it self inside the GC.
578 *
579 * @param pVM The cross context VM structure.
580 */
581VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
582{
583 LogFlow(("EMR3Relocate\n"));
584 for (VMCPUID i = 0; i < pVM->cCpus; i++)
585 {
586 PVMCPU pVCpu = &pVM->aCpus[i];
587 if (pVCpu->em.s.pStatsR3)
588 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
589 }
590}
591
592
593/**
594 * Reset the EM state for a CPU.
595 *
596 * Called by EMR3Reset and hot plugging.
597 *
598 * @param pVCpu The cross context virtual CPU structure.
599 */
600VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
601{
602 /* Reset scheduling state. */
603 pVCpu->em.s.fForceRAW = false;
604 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
605
606 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
607 out of the HALTED state here so that enmPrevState doesn't end up as
608 HALTED when EMR3Execute returns. */
609 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
610 {
611 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
612 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
613 }
614}
615
616
617/**
618 * Reset notification.
619 *
620 * @param pVM The cross context VM structure.
621 */
622VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
623{
624 Log(("EMR3Reset: \n"));
625 for (VMCPUID i = 0; i < pVM->cCpus; i++)
626 EMR3ResetCpu(&pVM->aCpus[i]);
627}
628
629
630/**
631 * Terminates the EM.
632 *
633 * Termination means cleaning up and freeing all resources,
634 * the VM it self is at this point powered off or suspended.
635 *
636 * @returns VBox status code.
637 * @param pVM The cross context VM structure.
638 */
639VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
640{
641 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
642
643#ifdef VBOX_WITH_REM
644 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
645#else
646 RT_NOREF(pVM);
647#endif
648 return VINF_SUCCESS;
649}
650
651
652/**
653 * Execute state save operation.
654 *
655 * @returns VBox status code.
656 * @param pVM The cross context VM structure.
657 * @param pSSM SSM operation handle.
658 */
659static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
660{
661 for (VMCPUID i = 0; i < pVM->cCpus; i++)
662 {
663 PVMCPU pVCpu = &pVM->aCpus[i];
664
665 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
666
667 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
668 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
669 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
670
671 /* Save mwait state. */
672 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
673 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
674 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
675 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
676 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
677 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
678 AssertRCReturn(rc, rc);
679 }
680 return VINF_SUCCESS;
681}
682
683
684/**
685 * Execute state load operation.
686 *
687 * @returns VBox status code.
688 * @param pVM The cross context VM structure.
689 * @param pSSM SSM operation handle.
690 * @param uVersion Data layout version.
691 * @param uPass The data pass.
692 */
693static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
694{
695 /*
696 * Validate version.
697 */
698 if ( uVersion > EM_SAVED_STATE_VERSION
699 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
700 {
701 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
702 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
703 }
704 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
705
706 /*
707 * Load the saved state.
708 */
709 for (VMCPUID i = 0; i < pVM->cCpus; i++)
710 {
711 PVMCPU pVCpu = &pVM->aCpus[i];
712
713 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
714 if (RT_FAILURE(rc))
715 pVCpu->em.s.fForceRAW = false;
716 AssertRCReturn(rc, rc);
717
718 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
719 {
720 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
721 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
722 AssertRCReturn(rc, rc);
723 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
724
725 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
726 }
727 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
728 {
729 /* Load mwait state. */
730 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
731 AssertRCReturn(rc, rc);
732 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
733 AssertRCReturn(rc, rc);
734 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
735 AssertRCReturn(rc, rc);
736 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
737 AssertRCReturn(rc, rc);
738 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
739 AssertRCReturn(rc, rc);
740 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
741 AssertRCReturn(rc, rc);
742 }
743
744 Assert(!pVCpu->em.s.pCliStatTree);
745 }
746 return VINF_SUCCESS;
747}
748
749
750/**
751 * Argument packet for emR3SetExecutionPolicy.
752 */
753struct EMR3SETEXECPOLICYARGS
754{
755 EMEXECPOLICY enmPolicy;
756 bool fEnforce;
757};
758
759
760/**
761 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
762 */
763static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
764{
765 /*
766 * Only the first CPU changes the variables.
767 */
768 if (pVCpu->idCpu == 0)
769 {
770 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
771 switch (pArgs->enmPolicy)
772 {
773 case EMEXECPOLICY_RECOMPILE_RING0:
774 pVM->fRecompileSupervisor = pArgs->fEnforce;
775 break;
776 case EMEXECPOLICY_RECOMPILE_RING3:
777 pVM->fRecompileUser = pArgs->fEnforce;
778 break;
779 case EMEXECPOLICY_IEM_ALL:
780 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
781 break;
782 default:
783 AssertFailedReturn(VERR_INVALID_PARAMETER);
784 }
785 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
786 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
787 }
788
789 /*
790 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
791 */
792 return pVCpu->em.s.enmState == EMSTATE_RAW
793 || pVCpu->em.s.enmState == EMSTATE_HM
794 || pVCpu->em.s.enmState == EMSTATE_NEM
795 || pVCpu->em.s.enmState == EMSTATE_IEM
796 || pVCpu->em.s.enmState == EMSTATE_REM
797 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
798 ? VINF_EM_RESCHEDULE
799 : VINF_SUCCESS;
800}
801
802
803/**
804 * Changes an execution scheduling policy parameter.
805 *
806 * This is used to enable or disable raw-mode / hardware-virtualization
807 * execution of user and supervisor code.
808 *
809 * @returns VINF_SUCCESS on success.
810 * @returns VINF_RESCHEDULE if a rescheduling might be required.
811 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
812 *
813 * @param pUVM The user mode VM handle.
814 * @param enmPolicy The scheduling policy to change.
815 * @param fEnforce Whether to enforce the policy or not.
816 */
817VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
818{
819 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
820 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
821 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
822
823 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
824 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
825}
826
827
828/**
829 * Queries an execution scheduling policy parameter.
830 *
831 * @returns VBox status code
832 * @param pUVM The user mode VM handle.
833 * @param enmPolicy The scheduling policy to query.
834 * @param pfEnforced Where to return the current value.
835 */
836VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
837{
838 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
839 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
840 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
841 PVM pVM = pUVM->pVM;
842 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
843
844 /* No need to bother EMTs with a query. */
845 switch (enmPolicy)
846 {
847 case EMEXECPOLICY_RECOMPILE_RING0:
848 *pfEnforced = pVM->fRecompileSupervisor;
849 break;
850 case EMEXECPOLICY_RECOMPILE_RING3:
851 *pfEnforced = pVM->fRecompileUser;
852 break;
853 case EMEXECPOLICY_IEM_ALL:
854 *pfEnforced = pVM->em.s.fIemExecutesAll;
855 break;
856 default:
857 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
858 }
859
860 return VINF_SUCCESS;
861}
862
863
864/**
865 * Queries the main execution engine of the VM.
866 *
867 * @returns VBox status code
868 * @param pUVM The user mode VM handle.
869 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
870 */
871VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
872{
873 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
874 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
875
876 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
877 PVM pVM = pUVM->pVM;
878 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
879
880 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
881 return VINF_SUCCESS;
882}
883
884
885/**
886 * Raise a fatal error.
887 *
888 * Safely terminate the VM with full state report and stuff. This function
889 * will naturally never return.
890 *
891 * @param pVCpu The cross context virtual CPU structure.
892 * @param rc VBox status code.
893 */
894VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
895{
896 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
897 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
898}
899
900
901#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
902/**
903 * Gets the EM state name.
904 *
905 * @returns pointer to read only state name,
906 * @param enmState The state.
907 */
908static const char *emR3GetStateName(EMSTATE enmState)
909{
910 switch (enmState)
911 {
912 case EMSTATE_NONE: return "EMSTATE_NONE";
913 case EMSTATE_RAW: return "EMSTATE_RAW";
914 case EMSTATE_HM: return "EMSTATE_HM";
915 case EMSTATE_IEM: return "EMSTATE_IEM";
916 case EMSTATE_REM: return "EMSTATE_REM";
917 case EMSTATE_HALTED: return "EMSTATE_HALTED";
918 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
919 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
920 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
921 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
922 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
923 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
924 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
925 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
926 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
927 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
928 case EMSTATE_NEM: return "EMSTATE_NEM";
929 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
930 default: return "Unknown!";
931 }
932}
933#endif /* LOG_ENABLED || VBOX_STRICT */
934
935
936/**
937 * Handle pending ring-3 I/O port write.
938 *
939 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
940 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
941 *
942 * @returns Strict VBox status code.
943 * @param pVM The cross context VM structure.
944 * @param pVCpu The cross context virtual CPU structure.
945 */
946VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
947{
948 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
949
950 /* Get and clear the pending data. */
951 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
952 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
953 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
954 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
955 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
956
957 /* Assert sanity. */
958 switch (cbValue)
959 {
960 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
961 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
962 case 4: break;
963 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
964 }
965 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
966
967 /* Do the work.*/
968 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
969 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
970 if (IOM_SUCCESS(rcStrict))
971 {
972 pVCpu->cpum.GstCtx.rip += cbInstr;
973 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
974 }
975 return rcStrict;
976}
977
978
979/**
980 * Handle pending ring-3 I/O port write.
981 *
982 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
983 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
984 *
985 * @returns Strict VBox status code.
986 * @param pVM The cross context VM structure.
987 * @param pVCpu The cross context virtual CPU structure.
988 */
989VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
990{
991 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
992
993 /* Get and clear the pending data. */
994 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
995 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
996 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
997 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
998
999 /* Assert sanity. */
1000 switch (cbValue)
1001 {
1002 case 1: break;
1003 case 2: break;
1004 case 4: break;
1005 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
1006 }
1007 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
1008 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
1009
1010 /* Do the work.*/
1011 uint32_t uValue = 0;
1012 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
1013 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
1014 if (IOM_SUCCESS(rcStrict))
1015 {
1016 if (cbValue == 4)
1017 pVCpu->cpum.GstCtx.rax = uValue;
1018 else if (cbValue == 2)
1019 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
1020 else
1021 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
1022 pVCpu->cpum.GstCtx.rip += cbInstr;
1023 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
1024 }
1025 return rcStrict;
1026}
1027
1028
1029/**
1030 * Debug loop.
1031 *
1032 * @returns VBox status code for EM.
1033 * @param pVM The cross context VM structure.
1034 * @param pVCpu The cross context virtual CPU structure.
1035 * @param rc Current EM VBox status code.
1036 */
1037static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1038{
1039 for (;;)
1040 {
1041 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1042 const VBOXSTRICTRC rcLast = rc;
1043
1044 /*
1045 * Debug related RC.
1046 */
1047 switch (VBOXSTRICTRC_VAL(rc))
1048 {
1049 /*
1050 * Single step an instruction.
1051 */
1052 case VINF_EM_DBG_STEP:
1053 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
1054 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
1055 || pVCpu->em.s.fForceRAW /* paranoia */)
1056#ifdef VBOX_WITH_RAW_MODE
1057 rc = emR3RawStep(pVM, pVCpu);
1058#else
1059 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
1060#endif
1061 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
1062 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
1063 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
1064 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
1065#ifdef VBOX_WITH_REM
1066 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
1067 rc = emR3RemStep(pVM, pVCpu);
1068#endif
1069 else
1070 {
1071 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
1072 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
1073 rc = VINF_EM_DBG_STEPPED;
1074 }
1075 break;
1076
1077 /*
1078 * Simple events: stepped, breakpoint, stop/assertion.
1079 */
1080 case VINF_EM_DBG_STEPPED:
1081 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
1082 break;
1083
1084 case VINF_EM_DBG_BREAKPOINT:
1085 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
1086 break;
1087
1088 case VINF_EM_DBG_STOP:
1089 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
1090 break;
1091
1092 case VINF_EM_DBG_EVENT:
1093 rc = DBGFR3EventHandlePending(pVM, pVCpu);
1094 break;
1095
1096 case VINF_EM_DBG_HYPER_STEPPED:
1097 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
1098 break;
1099
1100 case VINF_EM_DBG_HYPER_BREAKPOINT:
1101 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
1102 break;
1103
1104 case VINF_EM_DBG_HYPER_ASSERTION:
1105 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1106 RTLogFlush(NULL);
1107 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1108 break;
1109
1110 /*
1111 * Guru meditation.
1112 */
1113 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
1114 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
1115 break;
1116 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
1117 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
1118 break;
1119 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
1120 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
1121 break;
1122
1123 default: /** @todo don't use default for guru, but make special errors code! */
1124 {
1125 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1126 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
1127 break;
1128 }
1129 }
1130
1131 /*
1132 * Process the result.
1133 */
1134 switch (VBOXSTRICTRC_VAL(rc))
1135 {
1136 /*
1137 * Continue the debugging loop.
1138 */
1139 case VINF_EM_DBG_STEP:
1140 case VINF_EM_DBG_STOP:
1141 case VINF_EM_DBG_EVENT:
1142 case VINF_EM_DBG_STEPPED:
1143 case VINF_EM_DBG_BREAKPOINT:
1144 case VINF_EM_DBG_HYPER_STEPPED:
1145 case VINF_EM_DBG_HYPER_BREAKPOINT:
1146 case VINF_EM_DBG_HYPER_ASSERTION:
1147 break;
1148
1149 /*
1150 * Resuming execution (in some form) has to be done here if we got
1151 * a hypervisor debug event.
1152 */
1153 case VINF_SUCCESS:
1154 case VINF_EM_RESUME:
1155 case VINF_EM_SUSPEND:
1156 case VINF_EM_RESCHEDULE:
1157 case VINF_EM_RESCHEDULE_RAW:
1158 case VINF_EM_RESCHEDULE_REM:
1159 case VINF_EM_HALT:
1160 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1161 {
1162#ifdef VBOX_WITH_RAW_MODE
1163 rc = emR3RawResumeHyper(pVM, pVCpu);
1164 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
1165 continue;
1166#else
1167 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
1168#endif
1169 }
1170 if (rc == VINF_SUCCESS)
1171 rc = VINF_EM_RESCHEDULE;
1172 return rc;
1173
1174 /*
1175 * The debugger isn't attached.
1176 * We'll simply turn the thing off since that's the easiest thing to do.
1177 */
1178 case VERR_DBGF_NOT_ATTACHED:
1179 switch (VBOXSTRICTRC_VAL(rcLast))
1180 {
1181 case VINF_EM_DBG_HYPER_STEPPED:
1182 case VINF_EM_DBG_HYPER_BREAKPOINT:
1183 case VINF_EM_DBG_HYPER_ASSERTION:
1184 case VERR_TRPM_PANIC:
1185 case VERR_TRPM_DONT_PANIC:
1186 case VERR_VMM_RING0_ASSERTION:
1187 case VERR_VMM_HYPER_CR3_MISMATCH:
1188 case VERR_VMM_RING3_CALL_DISABLED:
1189 return rcLast;
1190 }
1191 return VINF_EM_OFF;
1192
1193 /*
1194 * Status codes terminating the VM in one or another sense.
1195 */
1196 case VINF_EM_TERMINATE:
1197 case VINF_EM_OFF:
1198 case VINF_EM_RESET:
1199 case VINF_EM_NO_MEMORY:
1200 case VINF_EM_RAW_STALE_SELECTOR:
1201 case VINF_EM_RAW_IRET_TRAP:
1202 case VERR_TRPM_PANIC:
1203 case VERR_TRPM_DONT_PANIC:
1204 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1205 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1206 case VERR_VMM_RING0_ASSERTION:
1207 case VERR_VMM_HYPER_CR3_MISMATCH:
1208 case VERR_VMM_RING3_CALL_DISABLED:
1209 case VERR_INTERNAL_ERROR:
1210 case VERR_INTERNAL_ERROR_2:
1211 case VERR_INTERNAL_ERROR_3:
1212 case VERR_INTERNAL_ERROR_4:
1213 case VERR_INTERNAL_ERROR_5:
1214 case VERR_IPE_UNEXPECTED_STATUS:
1215 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1216 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1217 return rc;
1218
1219 /*
1220 * The rest is unexpected, and will keep us here.
1221 */
1222 default:
1223 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1224 break;
1225 }
1226 } /* debug for ever */
1227}
1228
1229
1230#if defined(VBOX_WITH_REM) || defined(DEBUG)
1231/**
1232 * Steps recompiled code.
1233 *
1234 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1235 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1236 *
1237 * @param pVM The cross context VM structure.
1238 * @param pVCpu The cross context virtual CPU structure.
1239 */
1240static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1241{
1242 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1243
1244# ifdef VBOX_WITH_REM
1245 EMRemLock(pVM);
1246
1247 /*
1248 * Switch to REM, step instruction, switch back.
1249 */
1250 int rc = REMR3State(pVM, pVCpu);
1251 if (RT_SUCCESS(rc))
1252 {
1253 rc = REMR3Step(pVM, pVCpu);
1254 REMR3StateBack(pVM, pVCpu);
1255 }
1256 EMRemUnlock(pVM);
1257
1258# else
1259 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1260# endif
1261
1262 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1263 return rc;
1264}
1265#endif /* VBOX_WITH_REM || DEBUG */
1266
1267
1268#ifdef VBOX_WITH_REM
1269/**
1270 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1271 * critical section.
1272 *
1273 * @returns false - new fInREMState value.
1274 * @param pVM The cross context VM structure.
1275 * @param pVCpu The cross context virtual CPU structure.
1276 */
1277DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1278{
1279 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1280 REMR3StateBack(pVM, pVCpu);
1281 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1282
1283 EMRemUnlock(pVM);
1284 return false;
1285}
1286#endif
1287
1288
1289/**
1290 * Executes recompiled code.
1291 *
1292 * This function contains the recompiler version of the inner
1293 * execution loop (the outer loop being in EMR3ExecuteVM()).
1294 *
1295 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1296 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1297 *
1298 * @param pVM The cross context VM structure.
1299 * @param pVCpu The cross context virtual CPU structure.
1300 * @param pfFFDone Where to store an indicator telling whether or not
1301 * FFs were done before returning.
1302 *
1303 */
1304static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1305{
1306#ifdef LOG_ENABLED
1307 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1308
1309 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
1310 Log(("EMV86: %04X:%08X IF=%d\n", pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
1311 else
1312 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, (uint32_t)pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.eflags.u));
1313#endif
1314 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1315
1316#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1317 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1318 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1319 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1320#endif
1321
1322 /*
1323 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1324 * or the REM suggests raw-mode execution.
1325 */
1326 *pfFFDone = false;
1327#ifdef VBOX_WITH_REM
1328 bool fInREMState = false;
1329#else
1330 uint32_t cLoops = 0;
1331#endif
1332 int rc = VINF_SUCCESS;
1333 for (;;)
1334 {
1335#ifdef VBOX_WITH_REM
1336 /*
1337 * Lock REM and update the state if not already in sync.
1338 *
1339 * Note! Big lock, but you are not supposed to own any lock when
1340 * coming in here.
1341 */
1342 if (!fInREMState)
1343 {
1344 EMRemLock(pVM);
1345 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1346
1347 /* Flush the recompiler translation blocks if the VCPU has changed,
1348 also force a full CPU state resync. */
1349 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1350 {
1351 REMFlushTBs(pVM);
1352 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1353 }
1354 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1355
1356 rc = REMR3State(pVM, pVCpu);
1357
1358 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1359 if (RT_FAILURE(rc))
1360 break;
1361 fInREMState = true;
1362
1363 /*
1364 * We might have missed the raising of VMREQ, TIMER and some other
1365 * important FFs while we were busy switching the state. So, check again.
1366 */
1367 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1368 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1369 {
1370 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1371 goto l_REMDoForcedActions;
1372 }
1373 }
1374#endif
1375
1376 /*
1377 * Execute REM.
1378 */
1379 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1380 {
1381 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1382#ifdef VBOX_WITH_REM
1383 rc = REMR3Run(pVM, pVCpu);
1384#else
1385 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1386#endif
1387 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1388 }
1389 else
1390 {
1391 /* Give up this time slice; virtual time continues */
1392 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1393 RTThreadSleep(5);
1394 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1395 rc = VINF_SUCCESS;
1396 }
1397
1398 /*
1399 * Deal with high priority post execution FFs before doing anything
1400 * else. Sync back the state and leave the lock to be on the safe side.
1401 */
1402 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1403 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1404 {
1405#ifdef VBOX_WITH_REM
1406 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1407#endif
1408 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1409 }
1410
1411 /*
1412 * Process the returned status code.
1413 */
1414 if (rc != VINF_SUCCESS)
1415 {
1416 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1417 break;
1418 if (rc != VINF_REM_INTERRUPED_FF)
1419 {
1420#ifndef VBOX_WITH_REM
1421 /* Try dodge unimplemented IEM trouble by reschduling. */
1422 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1423 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1424 {
1425 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1426 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1427 {
1428 rc = VINF_EM_RESCHEDULE;
1429 break;
1430 }
1431 }
1432#endif
1433
1434 /*
1435 * Anything which is not known to us means an internal error
1436 * and the termination of the VM!
1437 */
1438 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1439 break;
1440 }
1441 }
1442
1443
1444 /*
1445 * Check and execute forced actions.
1446 *
1447 * Sync back the VM state and leave the lock before calling any of
1448 * these, you never know what's going to happen here.
1449 */
1450#ifdef VBOX_HIGH_RES_TIMERS_HACK
1451 TMTimerPollVoid(pVM, pVCpu);
1452#endif
1453 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1454 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1455 || VMCPU_FF_IS_PENDING(pVCpu,
1456 VMCPU_FF_ALL_REM_MASK
1457 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1458 {
1459#ifdef VBOX_WITH_REM
1460l_REMDoForcedActions:
1461 if (fInREMState)
1462 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1463#endif
1464 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1465 rc = emR3ForcedActions(pVM, pVCpu, rc);
1466 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1467 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1468 if ( rc != VINF_SUCCESS
1469 && rc != VINF_EM_RESCHEDULE_REM)
1470 {
1471 *pfFFDone = true;
1472 break;
1473 }
1474 }
1475
1476#ifndef VBOX_WITH_REM
1477 /*
1478 * Have to check if we can get back to fast execution mode every so often.
1479 */
1480 if (!(++cLoops & 7))
1481 {
1482 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu);
1483 if ( enmCheck != EMSTATE_REM
1484 && enmCheck != EMSTATE_IEM_THEN_REM)
1485 return VINF_EM_RESCHEDULE;
1486 }
1487#endif
1488
1489 } /* The Inner Loop, recompiled execution mode version. */
1490
1491
1492#ifdef VBOX_WITH_REM
1493 /*
1494 * Returning. Sync back the VM state if required.
1495 */
1496 if (fInREMState)
1497 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1498#endif
1499
1500 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1501 return rc;
1502}
1503
1504
1505#ifdef DEBUG
1506
1507int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1508{
1509 EMSTATE enmOldState = pVCpu->em.s.enmState;
1510
1511 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1512
1513 Log(("Single step BEGIN:\n"));
1514 for (uint32_t i = 0; i < cIterations; i++)
1515 {
1516 DBGFR3PrgStep(pVCpu);
1517 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1518 emR3RemStep(pVM, pVCpu);
1519 if (emR3Reschedule(pVM, pVCpu) != EMSTATE_REM)
1520 break;
1521 }
1522 Log(("Single step END:\n"));
1523 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1524 pVCpu->em.s.enmState = enmOldState;
1525 return VINF_EM_RESCHEDULE;
1526}
1527
1528#endif /* DEBUG */
1529
1530
1531/**
1532 * Try execute the problematic code in IEM first, then fall back on REM if there
1533 * is too much of it or if IEM doesn't implement something.
1534 *
1535 * @returns Strict VBox status code from IEMExecLots.
1536 * @param pVM The cross context VM structure.
1537 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1538 * @param pfFFDone Force flags done indicator.
1539 *
1540 * @thread EMT(pVCpu)
1541 */
1542static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1543{
1544 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1545 *pfFFDone = false;
1546
1547 /*
1548 * Execute in IEM for a while.
1549 */
1550 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1551 {
1552 uint32_t cInstructions;
1553 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1554 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1555 if (rcStrict != VINF_SUCCESS)
1556 {
1557 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1558 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1559 break;
1560
1561 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1562 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1563 return rcStrict;
1564 }
1565
1566 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu);
1567 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1568 {
1569 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1570 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1571 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1572 pVCpu->em.s.enmState = enmNewState;
1573 return VINF_SUCCESS;
1574 }
1575
1576 /*
1577 * Check for pending actions.
1578 */
1579 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1580 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1581 return VINF_SUCCESS;
1582 }
1583
1584 /*
1585 * Switch to REM.
1586 */
1587 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1588 pVCpu->em.s.enmState = EMSTATE_REM;
1589 return VINF_SUCCESS;
1590}
1591
1592
1593/**
1594 * Decides whether to execute RAW, HWACC or REM.
1595 *
1596 * @returns new EM state
1597 * @param pVM The cross context VM structure.
1598 * @param pVCpu The cross context virtual CPU structure.
1599 */
1600EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu)
1601{
1602 /*
1603 * When forcing raw-mode execution, things are simple.
1604 */
1605 if (pVCpu->em.s.fForceRAW)
1606 return EMSTATE_RAW;
1607
1608 /*
1609 * We stay in the wait for SIPI state unless explicitly told otherwise.
1610 */
1611 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1612 return EMSTATE_WAIT_SIPI;
1613
1614 /*
1615 * Execute everything in IEM?
1616 */
1617 if (pVM->em.s.fIemExecutesAll)
1618 return EMSTATE_IEM;
1619
1620 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1621 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1622 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1623
1624 X86EFLAGS EFlags = pVCpu->cpum.GstCtx.eflags;
1625 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1626 {
1627 if (EMIsHwVirtExecutionEnabled(pVM))
1628 {
1629 if (VM_IS_HM_ENABLED(pVM))
1630 {
1631 if (HMR3CanExecuteGuest(pVM, &pVCpu->cpum.GstCtx))
1632 return EMSTATE_HM;
1633 }
1634 else if (NEMR3CanExecuteGuest(pVM, pVCpu))
1635 return EMSTATE_NEM;
1636
1637 /*
1638 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1639 * turns off monitoring features essential for raw mode!
1640 */
1641 return EMSTATE_IEM_THEN_REM;
1642 }
1643 }
1644
1645 /*
1646 * Standard raw-mode:
1647 *
1648 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1649 * or 32 bits protected mode ring 0 code
1650 *
1651 * The tests are ordered by the likelihood of being true during normal execution.
1652 */
1653 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1654 {
1655 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1656 return EMSTATE_REM;
1657 }
1658
1659# ifndef VBOX_RAW_V86
1660 if (EFlags.u32 & X86_EFL_VM) {
1661 Log2(("raw mode refused: VM_MASK\n"));
1662 return EMSTATE_REM;
1663 }
1664# endif
1665
1666 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1667 uint32_t u32CR0 = pVCpu->cpum.GstCtx.cr0;
1668 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1669 {
1670 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1671 return EMSTATE_REM;
1672 }
1673
1674 if (pVCpu->cpum.GstCtx.cr4 & X86_CR4_PAE)
1675 {
1676 uint32_t u32Dummy, u32Features;
1677
1678 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1679 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1680 return EMSTATE_REM;
1681 }
1682
1683 unsigned uSS = pVCpu->cpum.GstCtx.ss.Sel;
1684 if ( pVCpu->cpum.GstCtx.eflags.Bits.u1VM
1685 || (uSS & X86_SEL_RPL) == 3)
1686 {
1687 if (!EMIsRawRing3Enabled(pVM))
1688 return EMSTATE_REM;
1689
1690 if (!(EFlags.u32 & X86_EFL_IF))
1691 {
1692 Log2(("raw mode refused: IF (RawR3)\n"));
1693 return EMSTATE_REM;
1694 }
1695
1696 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1697 {
1698 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1699 return EMSTATE_REM;
1700 }
1701 }
1702 else
1703 {
1704 if (!EMIsRawRing0Enabled(pVM))
1705 return EMSTATE_REM;
1706
1707 if (EMIsRawRing1Enabled(pVM))
1708 {
1709 /* Only ring 0 and 1 supervisor code. */
1710 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1711 {
1712 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1713 return EMSTATE_REM;
1714 }
1715 }
1716 /* Only ring 0 supervisor code. */
1717 else if ((uSS & X86_SEL_RPL) != 0)
1718 {
1719 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1720 return EMSTATE_REM;
1721 }
1722
1723 // Let's start with pure 32 bits ring 0 code first
1724 /** @todo What's pure 32-bit mode? flat? */
1725 if ( !(pVCpu->cpum.GstCtx.ss.Attr.n.u1DefBig)
1726 || !(pVCpu->cpum.GstCtx.cs.Attr.n.u1DefBig))
1727 {
1728 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1729 return EMSTATE_REM;
1730 }
1731
1732 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1733 if (!(u32CR0 & X86_CR0_WP))
1734 {
1735 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1736 return EMSTATE_REM;
1737 }
1738
1739# ifdef VBOX_WITH_RAW_MODE
1740 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pVCpu->cpum.GstCtx.eip))
1741 {
1742 Log2(("raw r0 mode forced: patch code\n"));
1743# ifdef VBOX_WITH_SAFE_STR
1744 Assert(pVCpu->cpum.GstCtx.tr.Sel);
1745# endif
1746 return EMSTATE_RAW;
1747 }
1748# endif /* VBOX_WITH_RAW_MODE */
1749
1750# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1751 if (!(EFlags.u32 & X86_EFL_IF))
1752 {
1753 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1754 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1755 return EMSTATE_REM;
1756 }
1757# endif
1758
1759# ifndef VBOX_WITH_RAW_RING1
1760 /** @todo still necessary??? */
1761 if (EFlags.Bits.u2IOPL != 0)
1762 {
1763 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1764 return EMSTATE_REM;
1765 }
1766# endif
1767 }
1768
1769 /*
1770 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1771 */
1772 if (pVCpu->cpum.GstCtx.cs.fFlags & CPUMSELREG_FLAGS_STALE)
1773 {
1774 Log2(("raw mode refused: stale CS\n"));
1775 return EMSTATE_REM;
1776 }
1777 if (pVCpu->cpum.GstCtx.ss.fFlags & CPUMSELREG_FLAGS_STALE)
1778 {
1779 Log2(("raw mode refused: stale SS\n"));
1780 return EMSTATE_REM;
1781 }
1782 if (pVCpu->cpum.GstCtx.ds.fFlags & CPUMSELREG_FLAGS_STALE)
1783 {
1784 Log2(("raw mode refused: stale DS\n"));
1785 return EMSTATE_REM;
1786 }
1787 if (pVCpu->cpum.GstCtx.es.fFlags & CPUMSELREG_FLAGS_STALE)
1788 {
1789 Log2(("raw mode refused: stale ES\n"));
1790 return EMSTATE_REM;
1791 }
1792 if (pVCpu->cpum.GstCtx.fs.fFlags & CPUMSELREG_FLAGS_STALE)
1793 {
1794 Log2(("raw mode refused: stale FS\n"));
1795 return EMSTATE_REM;
1796 }
1797 if (pVCpu->cpum.GstCtx.gs.fFlags & CPUMSELREG_FLAGS_STALE)
1798 {
1799 Log2(("raw mode refused: stale GS\n"));
1800 return EMSTATE_REM;
1801 }
1802
1803# ifdef VBOX_WITH_SAFE_STR
1804 if (pVCpu->cpum.GstCtx.tr.Sel == 0)
1805 {
1806 Log(("Raw mode refused -> TR=0\n"));
1807 return EMSTATE_REM;
1808 }
1809# endif
1810
1811 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1812 return EMSTATE_RAW;
1813}
1814
1815
1816/**
1817 * Executes all high priority post execution force actions.
1818 *
1819 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1820 * fatal error status code.
1821 *
1822 * @param pVM The cross context VM structure.
1823 * @param pVCpu The cross context virtual CPU structure.
1824 * @param rc The current strict VBox status code rc.
1825 */
1826VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1827{
1828 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1829
1830 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1831 PDMCritSectBothFF(pVCpu);
1832
1833 /* Update CR3 (Nested Paging case for HM). */
1834 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1835 {
1836 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1837 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1838 if (RT_FAILURE(rc2))
1839 return rc2;
1840 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1841 }
1842
1843 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1844 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1845 {
1846 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1847 if (CPUMIsGuestInPAEMode(pVCpu))
1848 {
1849 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1850 AssertPtr(pPdpes);
1851
1852 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1853 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1854 }
1855 else
1856 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1857 }
1858
1859 /* IEM has pending work (typically memory write after INS instruction). */
1860 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1861 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1862
1863 /* IOM has pending work (comitting an I/O or MMIO write). */
1864 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1865 {
1866 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1867 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1868 { /* half likely, or at least it's a line shorter. */ }
1869 else if (rc == VINF_SUCCESS)
1870 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1871 else
1872 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1873 }
1874
1875#ifdef VBOX_WITH_RAW_MODE
1876 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1877 CSAMR3DoPendingAction(pVM, pVCpu);
1878#endif
1879
1880 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1881 {
1882 if ( rc > VINF_EM_NO_MEMORY
1883 && rc <= VINF_EM_LAST)
1884 rc = VINF_EM_NO_MEMORY;
1885 }
1886
1887 return rc;
1888}
1889
1890#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1891/**
1892 * Helper for emR3ForcedActions() for injecting interrupts into the
1893 * nested-guest.
1894 *
1895 * @returns VBox status code.
1896 * @param pVCpu The cross context virtual CPU structure.
1897 * @param pfResched Where to store whether a reschedule is required.
1898 * @param pfInject Where to store whether an interrupt was injected (and if
1899 * a wake up is pending).
1900 */
1901static int emR3NstGstInjectIntr(PVMCPU pVCpu, bool *pfResched, bool *pfInject)
1902{
1903 *pfResched = false;
1904 *pfInject = false;
1905 if (CPUMIsGuestInSvmNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1906 {
1907 PVM pVM = pVCpu->CTX_SUFF(pVM);
1908 Assert(pVCpu->cpum.GstCtx.hwvirt.fGif);
1909 bool fVirtualGif = CPUMGetSvmNstGstVGif(&pVCpu->cpum.GstCtx);
1910#ifdef VBOX_WITH_RAW_MODE
1911 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip);
1912#endif
1913 if (fVirtualGif)
1914 {
1915 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, &pVCpu->cpum.GstCtx))
1916 {
1917 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1918 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1919 {
1920 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_INTR))
1921 {
1922 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1923 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1924 if (RT_SUCCESS(rcStrict))
1925 {
1926 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1927 * doesn't intercept HLT but intercepts INTR? */
1928 *pfResched = true;
1929 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1930 if (rcStrict == VINF_SVM_VMEXIT)
1931 return VINF_SUCCESS;
1932 return VBOXSTRICTRC_VAL(rcStrict);
1933 }
1934
1935 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1936 return VINF_EM_TRIPLE_FAULT;
1937 }
1938
1939 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1940 /** @todo this really isn't nice, should properly handle this */
1941 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
1942 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1943 Assert(rc != VINF_PGM_CHANGE_MODE);
1944 if (rc == VINF_SVM_VMEXIT)
1945 rc = VINF_SUCCESS;
1946 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1947 || rc == VINF_EM_RESCHEDULE_HM
1948 || rc == VINF_EM_RESCHEDULE_RAW))
1949 {
1950 rc = VINF_EM_RESCHEDULE;
1951 }
1952
1953 *pfResched = true;
1954 *pfInject = true;
1955 return rc;
1956 }
1957 }
1958
1959 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1960 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, &pVCpu->cpum.GstCtx))
1961 {
1962 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, &pVCpu->cpum.GstCtx, SVM_CTRL_INTERCEPT_VINTR))
1963 {
1964 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_SVM_VMEXIT_MASK);
1965 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1966 if (RT_SUCCESS(rcStrict))
1967 {
1968 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1969 * doesn't intercept HLT but intercepts VINTR? */
1970 *pfResched = true;
1971 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1972 if (rcStrict == VINF_SVM_VMEXIT)
1973 return VINF_SUCCESS;
1974 return VBOXSTRICTRC_VAL(rcStrict);
1975 }
1976
1977 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1978 return VINF_EM_TRIPLE_FAULT;
1979 }
1980
1981 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1982 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(&pVCpu->cpum.GstCtx);
1983 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1984 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1985 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1986
1987 *pfResched = true;
1988 *pfInject = true;
1989 return VINF_EM_RESCHEDULE;
1990 }
1991 }
1992 return VINF_SUCCESS;
1993 }
1994
1995 if (CPUMIsGuestInVmxNestedHwVirtMode(&pVCpu->cpum.GstCtx))
1996 { /** @todo Nested VMX. */ }
1997
1998 /* Shouldn't really get here. */
1999 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
2000 return VERR_EM_INTERNAL_ERROR;
2001}
2002#endif
2003
2004/**
2005 * Executes all pending forced actions.
2006 *
2007 * Forced actions can cause execution delays and execution
2008 * rescheduling. The first we deal with using action priority, so
2009 * that for instance pending timers aren't scheduled and ran until
2010 * right before execution. The rescheduling we deal with using
2011 * return codes. The same goes for VM termination, only in that case
2012 * we exit everything.
2013 *
2014 * @returns VBox status code of equal or greater importance/severity than rc.
2015 * The most important ones are: VINF_EM_RESCHEDULE,
2016 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2017 *
2018 * @param pVM The cross context VM structure.
2019 * @param pVCpu The cross context virtual CPU structure.
2020 * @param rc The current rc.
2021 *
2022 */
2023int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
2024{
2025 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
2026#ifdef VBOX_STRICT
2027 int rcIrq = VINF_SUCCESS;
2028#endif
2029 int rc2;
2030#define UPDATE_RC() \
2031 do { \
2032 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
2033 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
2034 break; \
2035 if (!rc || rc2 < rc) \
2036 rc = rc2; \
2037 } while (0)
2038 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
2039
2040 /*
2041 * Post execution chunk first.
2042 */
2043 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
2044 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
2045 {
2046 /*
2047 * EMT Rendezvous (must be serviced before termination).
2048 */
2049 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2050 {
2051 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2052 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2053 UPDATE_RC();
2054 /** @todo HACK ALERT! The following test is to make sure EM+TM
2055 * thinks the VM is stopped/reset before the next VM state change
2056 * is made. We need a better solution for this, or at least make it
2057 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2058 * VINF_EM_SUSPEND). */
2059 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2060 {
2061 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2062 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2063 return rc;
2064 }
2065 }
2066
2067 /*
2068 * State change request (cleared by vmR3SetStateLocked).
2069 */
2070 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2071 {
2072 VMSTATE enmState = VMR3GetState(pVM);
2073 switch (enmState)
2074 {
2075 case VMSTATE_FATAL_ERROR:
2076 case VMSTATE_FATAL_ERROR_LS:
2077 case VMSTATE_GURU_MEDITATION:
2078 case VMSTATE_GURU_MEDITATION_LS:
2079 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2080 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2081 return VINF_EM_SUSPEND;
2082
2083 case VMSTATE_DESTROYING:
2084 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2085 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2086 return VINF_EM_TERMINATE;
2087
2088 default:
2089 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2090 }
2091 }
2092
2093 /*
2094 * Debugger Facility polling.
2095 */
2096 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2097 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2098 {
2099 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2100 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2101 UPDATE_RC();
2102 }
2103
2104 /*
2105 * Postponed reset request.
2106 */
2107 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
2108 {
2109 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2110 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
2111 UPDATE_RC();
2112 }
2113
2114#ifdef VBOX_WITH_RAW_MODE
2115 /*
2116 * CSAM page scanning.
2117 */
2118 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2119 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
2120 {
2121 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
2122 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
2123 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2124 CSAMR3CheckCodeEx(pVM, &pVCpu->cpum.GstCtx, pVCpu->cpum.GstCtx.eip);
2125 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
2126 }
2127#endif
2128
2129 /*
2130 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
2131 */
2132 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2133 {
2134 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2135 UPDATE_RC();
2136 if (rc == VINF_EM_NO_MEMORY)
2137 return rc;
2138 }
2139
2140 /* check that we got them all */
2141 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2142 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
2143 }
2144
2145 /*
2146 * Normal priority then.
2147 * (Executed in no particular order.)
2148 */
2149 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
2150 {
2151 /*
2152 * PDM Queues are pending.
2153 */
2154 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
2155 PDMR3QueueFlushAll(pVM);
2156
2157 /*
2158 * PDM DMA transfers are pending.
2159 */
2160 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
2161 PDMR3DmaRun(pVM);
2162
2163 /*
2164 * EMT Rendezvous (make sure they are handled before the requests).
2165 */
2166 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2167 {
2168 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2169 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2170 UPDATE_RC();
2171 /** @todo HACK ALERT! The following test is to make sure EM+TM
2172 * thinks the VM is stopped/reset before the next VM state change
2173 * is made. We need a better solution for this, or at least make it
2174 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2175 * VINF_EM_SUSPEND). */
2176 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2177 {
2178 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2179 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2180 return rc;
2181 }
2182 }
2183
2184 /*
2185 * Requests from other threads.
2186 */
2187 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
2188 {
2189 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2190 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
2191 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
2192 {
2193 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2194 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2195 return rc2;
2196 }
2197 UPDATE_RC();
2198 /** @todo HACK ALERT! The following test is to make sure EM+TM
2199 * thinks the VM is stopped/reset before the next VM state change
2200 * is made. We need a better solution for this, or at least make it
2201 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2202 * VINF_EM_SUSPEND). */
2203 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2204 {
2205 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2206 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2207 return rc;
2208 }
2209 }
2210
2211#ifdef VBOX_WITH_REM
2212 /* Replay the handler notification changes. */
2213 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2214 {
2215 /* Try not to cause deadlocks. */
2216 if ( pVM->cCpus == 1
2217 || ( !PGMIsLockOwner(pVM)
2218 && !IOMIsLockWriteOwner(pVM))
2219 )
2220 {
2221 EMRemLock(pVM);
2222 REMR3ReplayHandlerNotifications(pVM);
2223 EMRemUnlock(pVM);
2224 }
2225 }
2226#endif
2227
2228 /* check that we got them all */
2229 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2230 }
2231
2232 /*
2233 * Normal priority then. (per-VCPU)
2234 * (Executed in no particular order.)
2235 */
2236 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2237 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2238 {
2239 /*
2240 * Requests from other threads.
2241 */
2242 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2243 {
2244 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2245 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2246 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2247 {
2248 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2249 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2250 return rc2;
2251 }
2252 UPDATE_RC();
2253 /** @todo HACK ALERT! The following test is to make sure EM+TM
2254 * thinks the VM is stopped/reset before the next VM state change
2255 * is made. We need a better solution for this, or at least make it
2256 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2257 * VINF_EM_SUSPEND). */
2258 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2259 {
2260 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2261 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2262 return rc;
2263 }
2264 }
2265
2266 /* check that we got them all */
2267 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2268 }
2269
2270 /*
2271 * High priority pre execution chunk last.
2272 * (Executed in ascending priority order.)
2273 */
2274 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2275 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2276 {
2277 /*
2278 * Timers before interrupts.
2279 */
2280 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2281 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2282 TMR3TimerQueuesDo(pVM);
2283
2284 /*
2285 * Pick up asynchronously posted interrupts into the APIC.
2286 */
2287 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2288 APICUpdatePendingInterrupts(pVCpu);
2289
2290 /*
2291 * The instruction following an emulated STI should *always* be executed!
2292 *
2293 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2294 * the eip is the same as the inhibited instr address. Before we
2295 * are able to execute this instruction in raw mode (iret to
2296 * guest code) an external interrupt might force a world switch
2297 * again. Possibly allowing a guest interrupt to be dispatched
2298 * in the process. This could break the guest. Sounds very
2299 * unlikely, but such timing sensitive problem are not as rare as
2300 * you might think.
2301 */
2302 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2303 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2304 {
2305 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2306 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2307 {
2308 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2309 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2310 }
2311 else
2312 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2313 }
2314
2315 /*
2316 * Interrupts.
2317 */
2318 bool fWakeupPending = false;
2319 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2320 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2321 {
2322 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2323 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2324 {
2325 Assert(!HMR3IsEventPending(pVCpu));
2326#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2327 if (CPUMIsGuestInNestedHwVirtMode(&pVCpu->cpum.GstCtx))
2328 {
2329 bool fResched, fInject;
2330 rc2 = emR3NstGstInjectIntr(pVCpu, &fResched, &fInject);
2331 if (fInject)
2332 {
2333 fWakeupPending = true;
2334# ifdef VBOX_STRICT
2335 rcIrq = rc2;
2336# endif
2337 }
2338 if (fResched)
2339 UPDATE_RC();
2340 }
2341 else
2342#endif
2343 {
2344 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2345 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2346#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2347 && pVCpu->cpum.GstCtx.hwvirt.fGif
2348#endif
2349#ifdef VBOX_WITH_RAW_MODE
2350 && !PATMIsPatchGCAddr(pVM, pVCpu->cpum.GstCtx.eip)
2351#endif
2352 && pVCpu->cpum.GstCtx.eflags.Bits.u1IF)
2353 {
2354 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2355 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2356 /** @todo this really isn't nice, should properly handle this */
2357 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2358 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2359 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2360 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2361 || rc2 == VINF_EM_RESCHEDULE_HM
2362 || rc2 == VINF_EM_RESCHEDULE_RAW))
2363 {
2364 rc2 = VINF_EM_RESCHEDULE;
2365 }
2366#ifdef VBOX_STRICT
2367 rcIrq = rc2;
2368#endif
2369 UPDATE_RC();
2370 /* Reschedule required: We must not miss the wakeup below! */
2371 fWakeupPending = true;
2372 }
2373 }
2374 }
2375 }
2376
2377 /*
2378 * Allocate handy pages.
2379 */
2380 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2381 {
2382 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2383 UPDATE_RC();
2384 }
2385
2386 /*
2387 * Debugger Facility request.
2388 */
2389 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2390 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2391 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2392 {
2393 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2394 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2395 UPDATE_RC();
2396 }
2397
2398 /*
2399 * EMT Rendezvous (must be serviced before termination).
2400 */
2401 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2402 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2403 {
2404 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2405 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2406 UPDATE_RC();
2407 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2408 * stopped/reset before the next VM state change is made. We need a better
2409 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2410 * && rc >= VINF_EM_SUSPEND). */
2411 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2412 {
2413 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2414 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2415 return rc;
2416 }
2417 }
2418
2419 /*
2420 * State change request (cleared by vmR3SetStateLocked).
2421 */
2422 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2423 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2424 {
2425 VMSTATE enmState = VMR3GetState(pVM);
2426 switch (enmState)
2427 {
2428 case VMSTATE_FATAL_ERROR:
2429 case VMSTATE_FATAL_ERROR_LS:
2430 case VMSTATE_GURU_MEDITATION:
2431 case VMSTATE_GURU_MEDITATION_LS:
2432 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2433 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2434 return VINF_EM_SUSPEND;
2435
2436 case VMSTATE_DESTROYING:
2437 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2438 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2439 return VINF_EM_TERMINATE;
2440
2441 default:
2442 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2443 }
2444 }
2445
2446 /*
2447 * Out of memory? Since most of our fellow high priority actions may cause us
2448 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2449 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2450 * than us since we can terminate without allocating more memory.
2451 */
2452 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2453 {
2454 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2455 UPDATE_RC();
2456 if (rc == VINF_EM_NO_MEMORY)
2457 return rc;
2458 }
2459
2460 /*
2461 * If the virtual sync clock is still stopped, make TM restart it.
2462 */
2463 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2464 TMR3VirtualSyncFF(pVM, pVCpu);
2465
2466#ifdef DEBUG
2467 /*
2468 * Debug, pause the VM.
2469 */
2470 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2471 {
2472 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2473 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2474 return VINF_EM_SUSPEND;
2475 }
2476#endif
2477
2478 /* check that we got them all */
2479 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2480 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2481 }
2482
2483#undef UPDATE_RC
2484 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2485 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2486 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2487 return rc;
2488}
2489
2490
2491/**
2492 * Check if the preset execution time cap restricts guest execution scheduling.
2493 *
2494 * @returns true if allowed, false otherwise
2495 * @param pVM The cross context VM structure.
2496 * @param pVCpu The cross context virtual CPU structure.
2497 */
2498bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2499{
2500 uint64_t u64UserTime, u64KernelTime;
2501
2502 if ( pVM->uCpuExecutionCap != 100
2503 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2504 {
2505 uint64_t u64TimeNow = RTTimeMilliTS();
2506 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2507 {
2508 /* New time slice. */
2509 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2510 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2511 pVCpu->em.s.u64TimeSliceExec = 0;
2512 }
2513 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2514
2515 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2516 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2517 return false;
2518 }
2519 return true;
2520}
2521
2522
2523/**
2524 * Execute VM.
2525 *
2526 * This function is the main loop of the VM. The emulation thread
2527 * calls this function when the VM has been successfully constructed
2528 * and we're ready for executing the VM.
2529 *
2530 * Returning from this function means that the VM is turned off or
2531 * suspended (state already saved) and deconstruction is next in line.
2532 *
2533 * All interaction from other thread are done using forced actions
2534 * and signaling of the wait object.
2535 *
2536 * @returns VBox status code, informational status codes may indicate failure.
2537 * @param pVM The cross context VM structure.
2538 * @param pVCpu The cross context virtual CPU structure.
2539 */
2540VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2541{
2542 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2543 pVM,
2544 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2545 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2546 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2547 pVCpu->em.s.fForceRAW));
2548 VM_ASSERT_EMT(pVM);
2549 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2550 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2551 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2552 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2553
2554 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2555 if (rc == 0)
2556 {
2557 /*
2558 * Start the virtual time.
2559 */
2560 TMR3NotifyResume(pVM, pVCpu);
2561
2562 /*
2563 * The Outer Main Loop.
2564 */
2565 bool fFFDone = false;
2566
2567 /* Reschedule right away to start in the right state. */
2568 rc = VINF_SUCCESS;
2569
2570 /* If resuming after a pause or a state load, restore the previous
2571 state or else we'll start executing code. Else, just reschedule. */
2572 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2573 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2574 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2575 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2576 else
2577 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu);
2578 pVCpu->em.s.cIemThenRemInstructions = 0;
2579 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2580
2581 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2582 for (;;)
2583 {
2584 /*
2585 * Before we can schedule anything (we're here because
2586 * scheduling is required) we must service any pending
2587 * forced actions to avoid any pending action causing
2588 * immediate rescheduling upon entering an inner loop
2589 *
2590 * Do forced actions.
2591 */
2592 if ( !fFFDone
2593 && RT_SUCCESS(rc)
2594 && rc != VINF_EM_TERMINATE
2595 && rc != VINF_EM_OFF
2596 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2597 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2598 {
2599 rc = emR3ForcedActions(pVM, pVCpu, rc);
2600 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2601 if ( ( rc == VINF_EM_RESCHEDULE_REM
2602 || rc == VINF_EM_RESCHEDULE_HM)
2603 && pVCpu->em.s.fForceRAW)
2604 rc = VINF_EM_RESCHEDULE_RAW;
2605 }
2606 else if (fFFDone)
2607 fFFDone = false;
2608
2609 /*
2610 * Now what to do?
2611 */
2612 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2613 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2614 switch (rc)
2615 {
2616 /*
2617 * Keep doing what we're currently doing.
2618 */
2619 case VINF_SUCCESS:
2620 break;
2621
2622 /*
2623 * Reschedule - to raw-mode execution.
2624 */
2625/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2626 case VINF_EM_RESCHEDULE_RAW:
2627 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2628 if (VM_IS_RAW_MODE_ENABLED(pVM))
2629 {
2630 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2631 pVCpu->em.s.enmState = EMSTATE_RAW;
2632 }
2633 else
2634 {
2635 AssertLogRelFailed();
2636 pVCpu->em.s.enmState = EMSTATE_NONE;
2637 }
2638 break;
2639
2640 /*
2641 * Reschedule - to HM or NEM.
2642 */
2643 case VINF_EM_RESCHEDULE_HM:
2644 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2645 Assert(!pVCpu->em.s.fForceRAW);
2646 if (VM_IS_HM_ENABLED(pVM))
2647 {
2648 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2649 pVCpu->em.s.enmState = EMSTATE_HM;
2650 }
2651 else if (VM_IS_NEM_ENABLED(pVM))
2652 {
2653 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2654 pVCpu->em.s.enmState = EMSTATE_NEM;
2655 }
2656 else
2657 {
2658 AssertLogRelFailed();
2659 pVCpu->em.s.enmState = EMSTATE_NONE;
2660 }
2661 break;
2662
2663 /*
2664 * Reschedule - to recompiled execution.
2665 */
2666 case VINF_EM_RESCHEDULE_REM:
2667 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2668 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2669 {
2670 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2671 enmOldState, EMSTATE_IEM_THEN_REM));
2672 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2673 {
2674 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2675 pVCpu->em.s.cIemThenRemInstructions = 0;
2676 }
2677 }
2678 else
2679 {
2680 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2681 pVCpu->em.s.enmState = EMSTATE_REM;
2682 }
2683 break;
2684
2685 /*
2686 * Resume.
2687 */
2688 case VINF_EM_RESUME:
2689 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2690 /* Don't reschedule in the halted or wait for SIPI case. */
2691 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2692 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2693 {
2694 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2695 break;
2696 }
2697 /* fall through and get scheduled. */
2698 RT_FALL_THRU();
2699
2700 /*
2701 * Reschedule.
2702 */
2703 case VINF_EM_RESCHEDULE:
2704 {
2705 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2706 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2707 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2708 pVCpu->em.s.cIemThenRemInstructions = 0;
2709 pVCpu->em.s.enmState = enmState;
2710 break;
2711 }
2712
2713 /*
2714 * Halted.
2715 */
2716 case VINF_EM_HALT:
2717 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2718 pVCpu->em.s.enmState = EMSTATE_HALTED;
2719 break;
2720
2721 /*
2722 * Switch to the wait for SIPI state (application processor only)
2723 */
2724 case VINF_EM_WAIT_SIPI:
2725 Assert(pVCpu->idCpu != 0);
2726 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2727 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2728 break;
2729
2730
2731 /*
2732 * Suspend.
2733 */
2734 case VINF_EM_SUSPEND:
2735 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2736 Assert(enmOldState != EMSTATE_SUSPENDED);
2737 pVCpu->em.s.enmPrevState = enmOldState;
2738 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2739 break;
2740
2741 /*
2742 * Reset.
2743 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2744 */
2745 case VINF_EM_RESET:
2746 {
2747 if (pVCpu->idCpu == 0)
2748 {
2749 EMSTATE enmState = emR3Reschedule(pVM, pVCpu);
2750 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2751 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2752 pVCpu->em.s.cIemThenRemInstructions = 0;
2753 pVCpu->em.s.enmState = enmState;
2754 }
2755 else
2756 {
2757 /* All other VCPUs go into the wait for SIPI state. */
2758 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2759 }
2760 break;
2761 }
2762
2763 /*
2764 * Power Off.
2765 */
2766 case VINF_EM_OFF:
2767 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2768 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2769 TMR3NotifySuspend(pVM, pVCpu);
2770 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2771 return rc;
2772
2773 /*
2774 * Terminate the VM.
2775 */
2776 case VINF_EM_TERMINATE:
2777 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2778 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2779 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2780 TMR3NotifySuspend(pVM, pVCpu);
2781 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2782 return rc;
2783
2784
2785 /*
2786 * Out of memory, suspend the VM and stuff.
2787 */
2788 case VINF_EM_NO_MEMORY:
2789 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2790 Assert(enmOldState != EMSTATE_SUSPENDED);
2791 pVCpu->em.s.enmPrevState = enmOldState;
2792 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2793 TMR3NotifySuspend(pVM, pVCpu);
2794 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2795
2796 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2797 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2798 if (rc != VINF_EM_SUSPEND)
2799 {
2800 if (RT_SUCCESS_NP(rc))
2801 {
2802 AssertLogRelMsgFailed(("%Rrc\n", rc));
2803 rc = VERR_EM_INTERNAL_ERROR;
2804 }
2805 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2806 }
2807 return rc;
2808
2809 /*
2810 * Guest debug events.
2811 */
2812 case VINF_EM_DBG_STEPPED:
2813 case VINF_EM_DBG_STOP:
2814 case VINF_EM_DBG_EVENT:
2815 case VINF_EM_DBG_BREAKPOINT:
2816 case VINF_EM_DBG_STEP:
2817 if (enmOldState == EMSTATE_RAW)
2818 {
2819 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2820 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2821 }
2822 else if (enmOldState == EMSTATE_HM)
2823 {
2824 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2825 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2826 }
2827 else if (enmOldState == EMSTATE_NEM)
2828 {
2829 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2830 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2831 }
2832 else if (enmOldState == EMSTATE_REM)
2833 {
2834 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2835 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2836 }
2837 else
2838 {
2839 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2840 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2841 }
2842 break;
2843
2844 /*
2845 * Hypervisor debug events.
2846 */
2847 case VINF_EM_DBG_HYPER_STEPPED:
2848 case VINF_EM_DBG_HYPER_BREAKPOINT:
2849 case VINF_EM_DBG_HYPER_ASSERTION:
2850 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2851 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2852 break;
2853
2854 /*
2855 * Triple fault.
2856 */
2857 case VINF_EM_TRIPLE_FAULT:
2858 if (!pVM->em.s.fGuruOnTripleFault)
2859 {
2860 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2861 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2862 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2863 continue;
2864 }
2865 /* Else fall through and trigger a guru. */
2866 RT_FALL_THRU();
2867
2868 case VERR_VMM_RING0_ASSERTION:
2869 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2870 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2871 break;
2872
2873 /*
2874 * Any error code showing up here other than the ones we
2875 * know and process above are considered to be FATAL.
2876 *
2877 * Unknown warnings and informational status codes are also
2878 * included in this.
2879 */
2880 default:
2881 if (RT_SUCCESS_NP(rc))
2882 {
2883 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2884 rc = VERR_EM_INTERNAL_ERROR;
2885 }
2886 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2887 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2888 break;
2889 }
2890
2891 /*
2892 * Act on state transition.
2893 */
2894 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2895 if (enmOldState != enmNewState)
2896 {
2897 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2898
2899 /* Clear MWait flags and the unhalt FF. */
2900 if ( enmOldState == EMSTATE_HALTED
2901 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2902 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2903 && ( enmNewState == EMSTATE_RAW
2904 || enmNewState == EMSTATE_HM
2905 || enmNewState == EMSTATE_NEM
2906 || enmNewState == EMSTATE_REM
2907 || enmNewState == EMSTATE_IEM_THEN_REM
2908 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2909 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2910 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2911 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2912 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2913 {
2914 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2915 {
2916 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2917 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2918 }
2919 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2920 {
2921 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2922 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2923 }
2924 }
2925 }
2926 else
2927 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2928
2929 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2930 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2931
2932 /*
2933 * Act on the new state.
2934 */
2935 switch (enmNewState)
2936 {
2937 /*
2938 * Execute raw.
2939 */
2940 case EMSTATE_RAW:
2941#ifdef VBOX_WITH_RAW_MODE
2942 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2943#else
2944 AssertLogRelMsgFailed(("%Rrc\n", rc));
2945 rc = VERR_EM_INTERNAL_ERROR;
2946#endif
2947 break;
2948
2949 /*
2950 * Execute hardware accelerated raw.
2951 */
2952 case EMSTATE_HM:
2953 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2954 break;
2955
2956 /*
2957 * Execute hardware accelerated raw.
2958 */
2959 case EMSTATE_NEM:
2960 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2961 break;
2962
2963 /*
2964 * Execute recompiled.
2965 */
2966 case EMSTATE_REM:
2967 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2968 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2969 break;
2970
2971 /*
2972 * Execute in the interpreter.
2973 */
2974 case EMSTATE_IEM:
2975 {
2976#if 0 /* For testing purposes. */
2977 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2978 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2979 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2980 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2981 rc = VINF_SUCCESS;
2982 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2983#endif
2984 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2985 if (pVM->em.s.fIemExecutesAll)
2986 {
2987 Assert(rc != VINF_EM_RESCHEDULE_REM);
2988 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2989 Assert(rc != VINF_EM_RESCHEDULE_HM);
2990 }
2991 fFFDone = false;
2992 break;
2993 }
2994
2995 /*
2996 * Execute in IEM, hoping we can quickly switch aback to HM
2997 * or RAW execution. If our hopes fail, we go to REM.
2998 */
2999 case EMSTATE_IEM_THEN_REM:
3000 {
3001 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
3002 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
3003 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
3004 break;
3005 }
3006
3007 /*
3008 * Application processor execution halted until SIPI.
3009 */
3010 case EMSTATE_WAIT_SIPI:
3011 /* no break */
3012 /*
3013 * hlt - execution halted until interrupt.
3014 */
3015 case EMSTATE_HALTED:
3016 {
3017 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
3018 /* If HM (or someone else) store a pending interrupt in
3019 TRPM, it must be dispatched ASAP without any halting.
3020 Anything pending in TRPM has been accepted and the CPU
3021 should already be the right state to receive it. */
3022 if (TRPMHasTrap(pVCpu))
3023 rc = VINF_EM_RESCHEDULE;
3024 /* MWAIT has a special extension where it's woken up when
3025 an interrupt is pending even when IF=0. */
3026 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
3027 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
3028 {
3029 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
3030 if (rc == VINF_SUCCESS)
3031 {
3032 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
3033 APICUpdatePendingInterrupts(pVCpu);
3034
3035 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
3036 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
3037 {
3038 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
3039 rc = VINF_EM_RESCHEDULE;
3040 }
3041 }
3042 }
3043 else
3044 {
3045 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
3046 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
3047 check VMCPU_FF_UPDATE_APIC here. */
3048 if ( rc == VINF_SUCCESS
3049 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
3050 {
3051 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
3052 rc = VINF_EM_RESCHEDULE;
3053 }
3054 }
3055
3056 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
3057 break;
3058 }
3059
3060 /*
3061 * Suspended - return to VM.cpp.
3062 */
3063 case EMSTATE_SUSPENDED:
3064 TMR3NotifySuspend(pVM, pVCpu);
3065 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3066 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3067 return VINF_EM_SUSPEND;
3068
3069 /*
3070 * Debugging in the guest.
3071 */
3072 case EMSTATE_DEBUG_GUEST_RAW:
3073 case EMSTATE_DEBUG_GUEST_HM:
3074 case EMSTATE_DEBUG_GUEST_NEM:
3075 case EMSTATE_DEBUG_GUEST_IEM:
3076 case EMSTATE_DEBUG_GUEST_REM:
3077 TMR3NotifySuspend(pVM, pVCpu);
3078 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3079 TMR3NotifyResume(pVM, pVCpu);
3080 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3081 break;
3082
3083 /*
3084 * Debugging in the hypervisor.
3085 */
3086 case EMSTATE_DEBUG_HYPER:
3087 {
3088 TMR3NotifySuspend(pVM, pVCpu);
3089 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3090
3091 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3092 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3093 if (rc != VINF_SUCCESS)
3094 {
3095 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
3096 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3097 else
3098 {
3099 /* switch to guru meditation mode */
3100 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3101 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3102 VMMR3FatalDump(pVM, pVCpu, rc);
3103 }
3104 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3105 return rc;
3106 }
3107
3108 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3109 TMR3NotifyResume(pVM, pVCpu);
3110 break;
3111 }
3112
3113 /*
3114 * Guru meditation takes place in the debugger.
3115 */
3116 case EMSTATE_GURU_MEDITATION:
3117 {
3118 TMR3NotifySuspend(pVM, pVCpu);
3119 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3120 VMMR3FatalDump(pVM, pVCpu, rc);
3121 emR3Debug(pVM, pVCpu, rc);
3122 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3123 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3124 return rc;
3125 }
3126
3127 /*
3128 * The states we don't expect here.
3129 */
3130 case EMSTATE_NONE:
3131 case EMSTATE_TERMINATING:
3132 default:
3133 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3134 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3135 TMR3NotifySuspend(pVM, pVCpu);
3136 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3137 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3138 return VERR_EM_INTERNAL_ERROR;
3139 }
3140 } /* The Outer Main Loop */
3141 }
3142 else
3143 {
3144 /*
3145 * Fatal error.
3146 */
3147 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3148 TMR3NotifySuspend(pVM, pVCpu);
3149 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3150 VMMR3FatalDump(pVM, pVCpu, rc);
3151 emR3Debug(pVM, pVCpu, rc);
3152 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3153 /** @todo change the VM state! */
3154 return rc;
3155 }
3156
3157 /* not reached */
3158}
3159
3160/**
3161 * Notify EM of a state change (used by FTM)
3162 *
3163 * @param pVM The cross context VM structure.
3164 */
3165VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3166{
3167 PVMCPU pVCpu = VMMGetCpu(pVM);
3168
3169 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3170 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3171 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3172 return VINF_SUCCESS;
3173}
3174
3175/**
3176 * Notify EM of a state change (used by FTM)
3177 *
3178 * @param pVM The cross context VM structure.
3179 */
3180VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3181{
3182 PVMCPU pVCpu = VMMGetCpu(pVM);
3183 EMSTATE enmCurState = pVCpu->em.s.enmState;
3184
3185 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3186 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3187 pVCpu->em.s.enmPrevState = enmCurState;
3188 return VINF_SUCCESS;
3189}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette