VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72489

Last change on this file since 72489 was 72489, checked in by vboxsync, 6 years ago

NEM,CPUM,EM: Don't sync in/out the entire state when leaving the inner NEM loop, only what IEM/TRPM might need. Speeds up MMIO and I/O requiring return to ring-3. bugref:9044 [build fix]

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 135.7 KB
Line 
1/* $Id: EM.cpp 72489 2018-06-09 12:28:23Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94#if defined(VBOX_WITH_REM) || defined(DEBUG)
95static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
96#endif
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
153
154#ifdef VBOX_WITH_REM
155 /*
156 * Initialize the REM critical section.
157 */
158 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
159 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
160 AssertRCReturn(rc, rc);
161#endif
162
163 /*
164 * Saved state.
165 */
166 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
167 NULL, NULL, NULL,
168 NULL, emR3Save, NULL,
169 NULL, emR3Load, NULL);
170 if (RT_FAILURE(rc))
171 return rc;
172
173 for (VMCPUID i = 0; i < pVM->cCpus; i++)
174 {
175 PVMCPU pVCpu = &pVM->aCpus[i];
176
177 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
178 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
179 pVCpu->em.s.fForceRAW = false;
180
181 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
182#ifdef VBOX_WITH_RAW_MODE
183 if (VM_IS_RAW_MODE_ENABLED(pVM))
184 {
185 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
186 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
187 }
188#endif
189
190 /* Force reset of the time slice. */
191 pVCpu->em.s.u64TimeSliceStart = 0;
192
193# define EM_REG_COUNTER(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_COUNTER_USED(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
199 AssertRC(rc);
200
201# define EM_REG_PROFILE(a, b, c) \
202 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
203 AssertRC(rc);
204
205# define EM_REG_PROFILE_ADV(a, b, c) \
206 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
207 AssertRC(rc);
208
209 /*
210 * Statistics.
211 */
212#ifdef VBOX_WITH_STATISTICS
213 PEMSTATS pStats;
214 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 pVCpu->em.s.pStatsR3 = pStats;
219 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
220 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
221
222 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
223 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
224
225 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
227
228 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
302
303 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
304 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
305
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
358
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
387
388 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
390 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
392
393 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
432#endif /* VBOX_WITH_STATISTICS */
433 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
434 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
435#ifdef VBOX_WITH_STATISTICS
436 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
440 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
441 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
442#endif /* VBOX_WITH_STATISTICS */
443
444 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
445 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
446 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
447 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
448 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
449
450 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
451 }
452
453 emR3InitDbg(pVM);
454 return VINF_SUCCESS;
455}
456
457
458/**
459 * Applies relocations to data and code managed by this
460 * component. This function will be called at init and
461 * whenever the VMM need to relocate it self inside the GC.
462 *
463 * @param pVM The cross context VM structure.
464 */
465VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
466{
467 LogFlow(("EMR3Relocate\n"));
468 for (VMCPUID i = 0; i < pVM->cCpus; i++)
469 {
470 PVMCPU pVCpu = &pVM->aCpus[i];
471 if (pVCpu->em.s.pStatsR3)
472 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
473 }
474}
475
476
477/**
478 * Reset the EM state for a CPU.
479 *
480 * Called by EMR3Reset and hot plugging.
481 *
482 * @param pVCpu The cross context virtual CPU structure.
483 */
484VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
485{
486 /* Reset scheduling state. */
487 pVCpu->em.s.fForceRAW = false;
488 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
489
490 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
491 out of the HALTED state here so that enmPrevState doesn't end up as
492 HALTED when EMR3Execute returns. */
493 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
494 {
495 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
496 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
497 }
498}
499
500
501/**
502 * Reset notification.
503 *
504 * @param pVM The cross context VM structure.
505 */
506VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
507{
508 Log(("EMR3Reset: \n"));
509 for (VMCPUID i = 0; i < pVM->cCpus; i++)
510 EMR3ResetCpu(&pVM->aCpus[i]);
511}
512
513
514/**
515 * Terminates the EM.
516 *
517 * Termination means cleaning up and freeing all resources,
518 * the VM it self is at this point powered off or suspended.
519 *
520 * @returns VBox status code.
521 * @param pVM The cross context VM structure.
522 */
523VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
524{
525 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
526
527#ifdef VBOX_WITH_REM
528 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
529#else
530 RT_NOREF(pVM);
531#endif
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * Execute state save operation.
538 *
539 * @returns VBox status code.
540 * @param pVM The cross context VM structure.
541 * @param pSSM SSM operation handle.
542 */
543static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
544{
545 for (VMCPUID i = 0; i < pVM->cCpus; i++)
546 {
547 PVMCPU pVCpu = &pVM->aCpus[i];
548
549 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
550
551 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
552 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
553 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
554
555 /* Save mwait state. */
556 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
557 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
558 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
559 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
560 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
561 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
562 AssertRCReturn(rc, rc);
563 }
564 return VINF_SUCCESS;
565}
566
567
568/**
569 * Execute state load operation.
570 *
571 * @returns VBox status code.
572 * @param pVM The cross context VM structure.
573 * @param pSSM SSM operation handle.
574 * @param uVersion Data layout version.
575 * @param uPass The data pass.
576 */
577static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
578{
579 /*
580 * Validate version.
581 */
582 if ( uVersion > EM_SAVED_STATE_VERSION
583 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
584 {
585 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
586 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
587 }
588 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
589
590 /*
591 * Load the saved state.
592 */
593 for (VMCPUID i = 0; i < pVM->cCpus; i++)
594 {
595 PVMCPU pVCpu = &pVM->aCpus[i];
596
597 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
598 if (RT_FAILURE(rc))
599 pVCpu->em.s.fForceRAW = false;
600 AssertRCReturn(rc, rc);
601
602 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
603 {
604 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
605 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
606 AssertRCReturn(rc, rc);
607 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
608
609 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
610 }
611 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
612 {
613 /* Load mwait state. */
614 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
615 AssertRCReturn(rc, rc);
616 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
617 AssertRCReturn(rc, rc);
618 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
619 AssertRCReturn(rc, rc);
620 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
621 AssertRCReturn(rc, rc);
622 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
623 AssertRCReturn(rc, rc);
624 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
625 AssertRCReturn(rc, rc);
626 }
627
628 Assert(!pVCpu->em.s.pCliStatTree);
629 }
630 return VINF_SUCCESS;
631}
632
633
634/**
635 * Argument packet for emR3SetExecutionPolicy.
636 */
637struct EMR3SETEXECPOLICYARGS
638{
639 EMEXECPOLICY enmPolicy;
640 bool fEnforce;
641};
642
643
644/**
645 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
646 */
647static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
648{
649 /*
650 * Only the first CPU changes the variables.
651 */
652 if (pVCpu->idCpu == 0)
653 {
654 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
655 switch (pArgs->enmPolicy)
656 {
657 case EMEXECPOLICY_RECOMPILE_RING0:
658 pVM->fRecompileSupervisor = pArgs->fEnforce;
659 break;
660 case EMEXECPOLICY_RECOMPILE_RING3:
661 pVM->fRecompileUser = pArgs->fEnforce;
662 break;
663 case EMEXECPOLICY_IEM_ALL:
664 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
665 break;
666 default:
667 AssertFailedReturn(VERR_INVALID_PARAMETER);
668 }
669 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
670 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
671 }
672
673 /*
674 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
675 */
676 return pVCpu->em.s.enmState == EMSTATE_RAW
677 || pVCpu->em.s.enmState == EMSTATE_HM
678 || pVCpu->em.s.enmState == EMSTATE_NEM
679 || pVCpu->em.s.enmState == EMSTATE_IEM
680 || pVCpu->em.s.enmState == EMSTATE_REM
681 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
682 ? VINF_EM_RESCHEDULE
683 : VINF_SUCCESS;
684}
685
686
687/**
688 * Changes an execution scheduling policy parameter.
689 *
690 * This is used to enable or disable raw-mode / hardware-virtualization
691 * execution of user and supervisor code.
692 *
693 * @returns VINF_SUCCESS on success.
694 * @returns VINF_RESCHEDULE if a rescheduling might be required.
695 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
696 *
697 * @param pUVM The user mode VM handle.
698 * @param enmPolicy The scheduling policy to change.
699 * @param fEnforce Whether to enforce the policy or not.
700 */
701VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
702{
703 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
704 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
705 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
706
707 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
708 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
709}
710
711
712/**
713 * Queries an execution scheduling policy parameter.
714 *
715 * @returns VBox status code
716 * @param pUVM The user mode VM handle.
717 * @param enmPolicy The scheduling policy to query.
718 * @param pfEnforced Where to return the current value.
719 */
720VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
721{
722 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
723 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
724 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
725 PVM pVM = pUVM->pVM;
726 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
727
728 /* No need to bother EMTs with a query. */
729 switch (enmPolicy)
730 {
731 case EMEXECPOLICY_RECOMPILE_RING0:
732 *pfEnforced = pVM->fRecompileSupervisor;
733 break;
734 case EMEXECPOLICY_RECOMPILE_RING3:
735 *pfEnforced = pVM->fRecompileUser;
736 break;
737 case EMEXECPOLICY_IEM_ALL:
738 *pfEnforced = pVM->em.s.fIemExecutesAll;
739 break;
740 default:
741 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
742 }
743
744 return VINF_SUCCESS;
745}
746
747
748/**
749 * Queries the main execution engine of the VM.
750 *
751 * @returns VBox status code
752 * @param pUVM The user mode VM handle.
753 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
754 */
755VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
756{
757 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
758 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
759
760 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
761 PVM pVM = pUVM->pVM;
762 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
763
764 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
765 return VINF_SUCCESS;
766}
767
768
769/**
770 * Raise a fatal error.
771 *
772 * Safely terminate the VM with full state report and stuff. This function
773 * will naturally never return.
774 *
775 * @param pVCpu The cross context virtual CPU structure.
776 * @param rc VBox status code.
777 */
778VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
779{
780 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
781 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
782}
783
784
785#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
786/**
787 * Gets the EM state name.
788 *
789 * @returns pointer to read only state name,
790 * @param enmState The state.
791 */
792static const char *emR3GetStateName(EMSTATE enmState)
793{
794 switch (enmState)
795 {
796 case EMSTATE_NONE: return "EMSTATE_NONE";
797 case EMSTATE_RAW: return "EMSTATE_RAW";
798 case EMSTATE_HM: return "EMSTATE_HM";
799 case EMSTATE_IEM: return "EMSTATE_IEM";
800 case EMSTATE_REM: return "EMSTATE_REM";
801 case EMSTATE_HALTED: return "EMSTATE_HALTED";
802 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
803 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
804 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
805 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
806 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
807 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
808 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
809 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
810 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
811 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
812 case EMSTATE_NEM: return "EMSTATE_NEM";
813 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
814 default: return "Unknown!";
815 }
816}
817#endif /* LOG_ENABLED || VBOX_STRICT */
818
819
820/**
821 * Debug loop.
822 *
823 * @returns VBox status code for EM.
824 * @param pVM The cross context VM structure.
825 * @param pVCpu The cross context virtual CPU structure.
826 * @param rc Current EM VBox status code.
827 */
828static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
829{
830 for (;;)
831 {
832 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
833 const VBOXSTRICTRC rcLast = rc;
834
835 /*
836 * Debug related RC.
837 */
838 switch (VBOXSTRICTRC_VAL(rc))
839 {
840 /*
841 * Single step an instruction.
842 */
843 case VINF_EM_DBG_STEP:
844 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
845 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
846 || pVCpu->em.s.fForceRAW /* paranoia */)
847#ifdef VBOX_WITH_RAW_MODE
848 rc = emR3RawStep(pVM, pVCpu);
849#else
850 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
851#endif
852 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
853 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
854 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
855 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
856#ifdef VBOX_WITH_REM
857 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
858 rc = emR3RemStep(pVM, pVCpu);
859#endif
860 else
861 {
862 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
863 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
864 rc = VINF_EM_DBG_STEPPED;
865 }
866 break;
867
868 /*
869 * Simple events: stepped, breakpoint, stop/assertion.
870 */
871 case VINF_EM_DBG_STEPPED:
872 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
873 break;
874
875 case VINF_EM_DBG_BREAKPOINT:
876 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
877 break;
878
879 case VINF_EM_DBG_STOP:
880 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
881 break;
882
883 case VINF_EM_DBG_EVENT:
884 rc = DBGFR3EventHandlePending(pVM, pVCpu);
885 break;
886
887 case VINF_EM_DBG_HYPER_STEPPED:
888 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
889 break;
890
891 case VINF_EM_DBG_HYPER_BREAKPOINT:
892 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
893 break;
894
895 case VINF_EM_DBG_HYPER_ASSERTION:
896 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
897 RTLogFlush(NULL);
898 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
899 break;
900
901 /*
902 * Guru meditation.
903 */
904 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
905 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
906 break;
907 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
908 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
909 break;
910 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
911 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
912 break;
913
914 default: /** @todo don't use default for guru, but make special errors code! */
915 {
916 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
917 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
918 break;
919 }
920 }
921
922 /*
923 * Process the result.
924 */
925 switch (VBOXSTRICTRC_VAL(rc))
926 {
927 /*
928 * Continue the debugging loop.
929 */
930 case VINF_EM_DBG_STEP:
931 case VINF_EM_DBG_STOP:
932 case VINF_EM_DBG_EVENT:
933 case VINF_EM_DBG_STEPPED:
934 case VINF_EM_DBG_BREAKPOINT:
935 case VINF_EM_DBG_HYPER_STEPPED:
936 case VINF_EM_DBG_HYPER_BREAKPOINT:
937 case VINF_EM_DBG_HYPER_ASSERTION:
938 break;
939
940 /*
941 * Resuming execution (in some form) has to be done here if we got
942 * a hypervisor debug event.
943 */
944 case VINF_SUCCESS:
945 case VINF_EM_RESUME:
946 case VINF_EM_SUSPEND:
947 case VINF_EM_RESCHEDULE:
948 case VINF_EM_RESCHEDULE_RAW:
949 case VINF_EM_RESCHEDULE_REM:
950 case VINF_EM_HALT:
951 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
952 {
953#ifdef VBOX_WITH_RAW_MODE
954 rc = emR3RawResumeHyper(pVM, pVCpu);
955 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
956 continue;
957#else
958 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
959#endif
960 }
961 if (rc == VINF_SUCCESS)
962 rc = VINF_EM_RESCHEDULE;
963 return rc;
964
965 /*
966 * The debugger isn't attached.
967 * We'll simply turn the thing off since that's the easiest thing to do.
968 */
969 case VERR_DBGF_NOT_ATTACHED:
970 switch (VBOXSTRICTRC_VAL(rcLast))
971 {
972 case VINF_EM_DBG_HYPER_STEPPED:
973 case VINF_EM_DBG_HYPER_BREAKPOINT:
974 case VINF_EM_DBG_HYPER_ASSERTION:
975 case VERR_TRPM_PANIC:
976 case VERR_TRPM_DONT_PANIC:
977 case VERR_VMM_RING0_ASSERTION:
978 case VERR_VMM_HYPER_CR3_MISMATCH:
979 case VERR_VMM_RING3_CALL_DISABLED:
980 return rcLast;
981 }
982 return VINF_EM_OFF;
983
984 /*
985 * Status codes terminating the VM in one or another sense.
986 */
987 case VINF_EM_TERMINATE:
988 case VINF_EM_OFF:
989 case VINF_EM_RESET:
990 case VINF_EM_NO_MEMORY:
991 case VINF_EM_RAW_STALE_SELECTOR:
992 case VINF_EM_RAW_IRET_TRAP:
993 case VERR_TRPM_PANIC:
994 case VERR_TRPM_DONT_PANIC:
995 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
996 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
997 case VERR_VMM_RING0_ASSERTION:
998 case VERR_VMM_HYPER_CR3_MISMATCH:
999 case VERR_VMM_RING3_CALL_DISABLED:
1000 case VERR_INTERNAL_ERROR:
1001 case VERR_INTERNAL_ERROR_2:
1002 case VERR_INTERNAL_ERROR_3:
1003 case VERR_INTERNAL_ERROR_4:
1004 case VERR_INTERNAL_ERROR_5:
1005 case VERR_IPE_UNEXPECTED_STATUS:
1006 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1007 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1008 return rc;
1009
1010 /*
1011 * The rest is unexpected, and will keep us here.
1012 */
1013 default:
1014 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1015 break;
1016 }
1017 } /* debug for ever */
1018}
1019
1020
1021#if defined(VBOX_WITH_REM) || defined(DEBUG)
1022/**
1023 * Steps recompiled code.
1024 *
1025 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1026 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1027 *
1028 * @param pVM The cross context VM structure.
1029 * @param pVCpu The cross context virtual CPU structure.
1030 */
1031static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1032{
1033 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1034
1035# ifdef VBOX_WITH_REM
1036 EMRemLock(pVM);
1037
1038 /*
1039 * Switch to REM, step instruction, switch back.
1040 */
1041 int rc = REMR3State(pVM, pVCpu);
1042 if (RT_SUCCESS(rc))
1043 {
1044 rc = REMR3Step(pVM, pVCpu);
1045 REMR3StateBack(pVM, pVCpu);
1046 }
1047 EMRemUnlock(pVM);
1048
1049# else
1050 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1051# endif
1052
1053 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1054 return rc;
1055}
1056#endif /* VBOX_WITH_REM || DEBUG */
1057
1058
1059#ifdef VBOX_WITH_REM
1060/**
1061 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1062 * critical section.
1063 *
1064 * @returns false - new fInREMState value.
1065 * @param pVM The cross context VM structure.
1066 * @param pVCpu The cross context virtual CPU structure.
1067 */
1068DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1069{
1070 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1071 REMR3StateBack(pVM, pVCpu);
1072 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1073
1074 EMRemUnlock(pVM);
1075 return false;
1076}
1077#endif
1078
1079
1080/**
1081 * Executes recompiled code.
1082 *
1083 * This function contains the recompiler version of the inner
1084 * execution loop (the outer loop being in EMR3ExecuteVM()).
1085 *
1086 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1087 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1088 *
1089 * @param pVM The cross context VM structure.
1090 * @param pVCpu The cross context virtual CPU structure.
1091 * @param pfFFDone Where to store an indicator telling whether or not
1092 * FFs were done before returning.
1093 *
1094 */
1095static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1096{
1097#ifdef LOG_ENABLED
1098 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1099 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1100
1101 if (pCtx->eflags.Bits.u1VM)
1102 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1103 else
1104 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1105#endif
1106 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1107
1108#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1109 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1110 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1111 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1112#endif
1113
1114 /*
1115 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1116 * or the REM suggests raw-mode execution.
1117 */
1118 *pfFFDone = false;
1119#ifdef VBOX_WITH_REM
1120 bool fInREMState = false;
1121#else
1122 uint32_t cLoops = 0;
1123#endif
1124 int rc = VINF_SUCCESS;
1125 for (;;)
1126 {
1127#ifdef VBOX_WITH_REM
1128 /*
1129 * Lock REM and update the state if not already in sync.
1130 *
1131 * Note! Big lock, but you are not supposed to own any lock when
1132 * coming in here.
1133 */
1134 if (!fInREMState)
1135 {
1136 EMRemLock(pVM);
1137 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1138
1139 /* Flush the recompiler translation blocks if the VCPU has changed,
1140 also force a full CPU state resync. */
1141 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1142 {
1143 REMFlushTBs(pVM);
1144 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1145 }
1146 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1147
1148 rc = REMR3State(pVM, pVCpu);
1149
1150 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1151 if (RT_FAILURE(rc))
1152 break;
1153 fInREMState = true;
1154
1155 /*
1156 * We might have missed the raising of VMREQ, TIMER and some other
1157 * important FFs while we were busy switching the state. So, check again.
1158 */
1159 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1160 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1161 {
1162 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1163 goto l_REMDoForcedActions;
1164 }
1165 }
1166#endif
1167
1168 /*
1169 * Execute REM.
1170 */
1171 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1172 {
1173 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1174#ifdef VBOX_WITH_REM
1175 rc = REMR3Run(pVM, pVCpu);
1176#else
1177 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1178#endif
1179 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1180 }
1181 else
1182 {
1183 /* Give up this time slice; virtual time continues */
1184 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1185 RTThreadSleep(5);
1186 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1187 rc = VINF_SUCCESS;
1188 }
1189
1190 /*
1191 * Deal with high priority post execution FFs before doing anything
1192 * else. Sync back the state and leave the lock to be on the safe side.
1193 */
1194 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1195 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1196 {
1197#ifdef VBOX_WITH_REM
1198 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1199#endif
1200 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1201 }
1202
1203 /*
1204 * Process the returned status code.
1205 */
1206 if (rc != VINF_SUCCESS)
1207 {
1208 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1209 break;
1210 if (rc != VINF_REM_INTERRUPED_FF)
1211 {
1212#ifndef VBOX_WITH_REM
1213 /* Try dodge unimplemented IEM trouble by reschduling. */
1214 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1215 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1216 {
1217 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1218 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1219 {
1220 rc = VINF_EM_RESCHEDULE;
1221 break;
1222 }
1223 }
1224#endif
1225
1226 /*
1227 * Anything which is not known to us means an internal error
1228 * and the termination of the VM!
1229 */
1230 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1231 break;
1232 }
1233 }
1234
1235
1236 /*
1237 * Check and execute forced actions.
1238 *
1239 * Sync back the VM state and leave the lock before calling any of
1240 * these, you never know what's going to happen here.
1241 */
1242#ifdef VBOX_HIGH_RES_TIMERS_HACK
1243 TMTimerPollVoid(pVM, pVCpu);
1244#endif
1245 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1246 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1247 || VMCPU_FF_IS_PENDING(pVCpu,
1248 VMCPU_FF_ALL_REM_MASK
1249 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1250 {
1251#ifdef VBOX_WITH_REM
1252l_REMDoForcedActions:
1253 if (fInREMState)
1254 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1255#endif
1256 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1257 rc = emR3ForcedActions(pVM, pVCpu, rc);
1258 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1259 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1260 if ( rc != VINF_SUCCESS
1261 && rc != VINF_EM_RESCHEDULE_REM)
1262 {
1263 *pfFFDone = true;
1264 break;
1265 }
1266 }
1267
1268#ifndef VBOX_WITH_REM
1269 /*
1270 * Have to check if we can get back to fast execution mode every so often.
1271 */
1272 if (!(++cLoops & 7))
1273 {
1274 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1275 if ( enmCheck != EMSTATE_REM
1276 && enmCheck != EMSTATE_IEM_THEN_REM)
1277 return VINF_EM_RESCHEDULE;
1278 }
1279#endif
1280
1281 } /* The Inner Loop, recompiled execution mode version. */
1282
1283
1284#ifdef VBOX_WITH_REM
1285 /*
1286 * Returning. Sync back the VM state if required.
1287 */
1288 if (fInREMState)
1289 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1290#endif
1291
1292 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1293 return rc;
1294}
1295
1296
1297#ifdef DEBUG
1298
1299int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1300{
1301 EMSTATE enmOldState = pVCpu->em.s.enmState;
1302
1303 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1304
1305 Log(("Single step BEGIN:\n"));
1306 for (uint32_t i = 0; i < cIterations; i++)
1307 {
1308 DBGFR3PrgStep(pVCpu);
1309 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1310 emR3RemStep(pVM, pVCpu);
1311 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1312 break;
1313 }
1314 Log(("Single step END:\n"));
1315 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1316 pVCpu->em.s.enmState = enmOldState;
1317 return VINF_EM_RESCHEDULE;
1318}
1319
1320#endif /* DEBUG */
1321
1322
1323/**
1324 * Try execute the problematic code in IEM first, then fall back on REM if there
1325 * is too much of it or if IEM doesn't implement something.
1326 *
1327 * @returns Strict VBox status code from IEMExecLots.
1328 * @param pVM The cross context VM structure.
1329 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1330 * @param pfFFDone Force flags done indicator.
1331 *
1332 * @thread EMT(pVCpu)
1333 */
1334static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1335{
1336 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1337 *pfFFDone = false;
1338
1339 /*
1340 * Execute in IEM for a while.
1341 */
1342 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1343 {
1344 uint32_t cInstructions;
1345 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1346 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1347 if (rcStrict != VINF_SUCCESS)
1348 {
1349 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1350 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1351 break;
1352
1353 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1354 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1355 return rcStrict;
1356 }
1357
1358 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1359 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1360 {
1361 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1362 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1363 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1364 pVCpu->em.s.enmState = enmNewState;
1365 return VINF_SUCCESS;
1366 }
1367
1368 /*
1369 * Check for pending actions.
1370 */
1371 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1372 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1373 return VINF_SUCCESS;
1374 }
1375
1376 /*
1377 * Switch to REM.
1378 */
1379 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1380 pVCpu->em.s.enmState = EMSTATE_REM;
1381 return VINF_SUCCESS;
1382}
1383
1384
1385/**
1386 * Decides whether to execute RAW, HWACC or REM.
1387 *
1388 * @returns new EM state
1389 * @param pVM The cross context VM structure.
1390 * @param pVCpu The cross context virtual CPU structure.
1391 * @param pCtx Pointer to the guest CPU context.
1392 */
1393EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1394{
1395 /*
1396 * When forcing raw-mode execution, things are simple.
1397 */
1398 if (pVCpu->em.s.fForceRAW)
1399 return EMSTATE_RAW;
1400
1401 /*
1402 * We stay in the wait for SIPI state unless explicitly told otherwise.
1403 */
1404 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1405 return EMSTATE_WAIT_SIPI;
1406
1407 /*
1408 * Execute everything in IEM?
1409 */
1410 if (pVM->em.s.fIemExecutesAll)
1411 return EMSTATE_IEM;
1412
1413 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1414 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1415 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1416
1417 X86EFLAGS EFlags = pCtx->eflags;
1418 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1419 {
1420 if (EMIsHwVirtExecutionEnabled(pVM))
1421 {
1422 if (VM_IS_HM_ENABLED(pVM))
1423 {
1424 if (HMR3CanExecuteGuest(pVM, pCtx))
1425 return EMSTATE_HM;
1426 }
1427 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1428 return EMSTATE_NEM;
1429
1430 /*
1431 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1432 * turns off monitoring features essential for raw mode!
1433 */
1434 return EMSTATE_IEM_THEN_REM;
1435 }
1436 }
1437
1438 /*
1439 * Standard raw-mode:
1440 *
1441 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1442 * or 32 bits protected mode ring 0 code
1443 *
1444 * The tests are ordered by the likelihood of being true during normal execution.
1445 */
1446 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1447 {
1448 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1449 return EMSTATE_REM;
1450 }
1451
1452# ifndef VBOX_RAW_V86
1453 if (EFlags.u32 & X86_EFL_VM) {
1454 Log2(("raw mode refused: VM_MASK\n"));
1455 return EMSTATE_REM;
1456 }
1457# endif
1458
1459 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1460 uint32_t u32CR0 = pCtx->cr0;
1461 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1462 {
1463 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1464 return EMSTATE_REM;
1465 }
1466
1467 if (pCtx->cr4 & X86_CR4_PAE)
1468 {
1469 uint32_t u32Dummy, u32Features;
1470
1471 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1472 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1473 return EMSTATE_REM;
1474 }
1475
1476 unsigned uSS = pCtx->ss.Sel;
1477 if ( pCtx->eflags.Bits.u1VM
1478 || (uSS & X86_SEL_RPL) == 3)
1479 {
1480 if (!EMIsRawRing3Enabled(pVM))
1481 return EMSTATE_REM;
1482
1483 if (!(EFlags.u32 & X86_EFL_IF))
1484 {
1485 Log2(("raw mode refused: IF (RawR3)\n"));
1486 return EMSTATE_REM;
1487 }
1488
1489 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1490 {
1491 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1492 return EMSTATE_REM;
1493 }
1494 }
1495 else
1496 {
1497 if (!EMIsRawRing0Enabled(pVM))
1498 return EMSTATE_REM;
1499
1500 if (EMIsRawRing1Enabled(pVM))
1501 {
1502 /* Only ring 0 and 1 supervisor code. */
1503 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1504 {
1505 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1506 return EMSTATE_REM;
1507 }
1508 }
1509 /* Only ring 0 supervisor code. */
1510 else if ((uSS & X86_SEL_RPL) != 0)
1511 {
1512 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1513 return EMSTATE_REM;
1514 }
1515
1516 // Let's start with pure 32 bits ring 0 code first
1517 /** @todo What's pure 32-bit mode? flat? */
1518 if ( !(pCtx->ss.Attr.n.u1DefBig)
1519 || !(pCtx->cs.Attr.n.u1DefBig))
1520 {
1521 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1522 return EMSTATE_REM;
1523 }
1524
1525 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1526 if (!(u32CR0 & X86_CR0_WP))
1527 {
1528 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1529 return EMSTATE_REM;
1530 }
1531
1532# ifdef VBOX_WITH_RAW_MODE
1533 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1534 {
1535 Log2(("raw r0 mode forced: patch code\n"));
1536# ifdef VBOX_WITH_SAFE_STR
1537 Assert(pCtx->tr.Sel);
1538# endif
1539 return EMSTATE_RAW;
1540 }
1541# endif /* VBOX_WITH_RAW_MODE */
1542
1543# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1544 if (!(EFlags.u32 & X86_EFL_IF))
1545 {
1546 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1547 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1548 return EMSTATE_REM;
1549 }
1550# endif
1551
1552# ifndef VBOX_WITH_RAW_RING1
1553 /** @todo still necessary??? */
1554 if (EFlags.Bits.u2IOPL != 0)
1555 {
1556 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1557 return EMSTATE_REM;
1558 }
1559# endif
1560 }
1561
1562 /*
1563 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1564 */
1565 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1566 {
1567 Log2(("raw mode refused: stale CS\n"));
1568 return EMSTATE_REM;
1569 }
1570 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1571 {
1572 Log2(("raw mode refused: stale SS\n"));
1573 return EMSTATE_REM;
1574 }
1575 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1576 {
1577 Log2(("raw mode refused: stale DS\n"));
1578 return EMSTATE_REM;
1579 }
1580 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1581 {
1582 Log2(("raw mode refused: stale ES\n"));
1583 return EMSTATE_REM;
1584 }
1585 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1586 {
1587 Log2(("raw mode refused: stale FS\n"));
1588 return EMSTATE_REM;
1589 }
1590 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1591 {
1592 Log2(("raw mode refused: stale GS\n"));
1593 return EMSTATE_REM;
1594 }
1595
1596# ifdef VBOX_WITH_SAFE_STR
1597 if (pCtx->tr.Sel == 0)
1598 {
1599 Log(("Raw mode refused -> TR=0\n"));
1600 return EMSTATE_REM;
1601 }
1602# endif
1603
1604 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1605 return EMSTATE_RAW;
1606}
1607
1608
1609/**
1610 * Executes all high priority post execution force actions.
1611 *
1612 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1613 * fatal error status code.
1614 *
1615 * @param pVM The cross context VM structure.
1616 * @param pVCpu The cross context virtual CPU structure.
1617 * @param rc The current strict VBox status code rc.
1618 */
1619VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1620{
1621 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1622
1623 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1624 PDMCritSectBothFF(pVCpu);
1625
1626 /* Update CR3 (Nested Paging case for HM). */
1627 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1628 {
1629 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1630 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1631 if (RT_FAILURE(rc2))
1632 return rc2;
1633 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1634 }
1635
1636 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1637 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1638 {
1639 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1640 if (CPUMIsGuestInPAEMode(pVCpu))
1641 {
1642 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1643 AssertPtr(pPdpes);
1644
1645 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1646 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1647 }
1648 else
1649 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1650 }
1651
1652 /* IEM has pending work (typically memory write after INS instruction). */
1653 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1654 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1655
1656 /* IOM has pending work (comitting an I/O or MMIO write). */
1657 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1658 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1659
1660#ifdef VBOX_WITH_RAW_MODE
1661 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1662 CSAMR3DoPendingAction(pVM, pVCpu);
1663#endif
1664
1665 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1666 {
1667 if ( rc > VINF_EM_NO_MEMORY
1668 && rc <= VINF_EM_LAST)
1669 rc = VINF_EM_NO_MEMORY;
1670 }
1671
1672 return rc;
1673}
1674
1675#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1676/**
1677 * Helper for emR3ForcedActions() for injecting interrupts into the
1678 * nested-guest.
1679 *
1680 * @returns VBox status code.
1681 * @param pVCpu The cross context virtual CPU structure.
1682 * @param pCtx Pointer to the nested-guest CPU context.
1683 * @param pfResched Where to store whether a reschedule is required.
1684 * @param pfInject Where to store whether an interrupt was injected (and if
1685 * a wake up is pending).
1686 */
1687static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1688{
1689 *pfResched = false;
1690 *pfInject = false;
1691 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1692 {
1693 PVM pVM = pVCpu->CTX_SUFF(pVM);
1694 Assert(pCtx->hwvirt.fGif);
1695 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
1696#ifdef VBOX_WITH_RAW_MODE
1697 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1698#endif
1699 if (fVirtualGif)
1700 {
1701 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1702 {
1703 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1704 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1705 {
1706 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1707 {
1708 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1709 if (RT_SUCCESS(rcStrict))
1710 {
1711 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1712 * doesn't intercept HLT but intercepts INTR? */
1713 *pfResched = true;
1714 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1715 if (rcStrict == VINF_SVM_VMEXIT)
1716 return VINF_SUCCESS;
1717 return VBOXSTRICTRC_VAL(rcStrict);
1718 }
1719
1720 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1721 return VINF_EM_TRIPLE_FAULT;
1722 }
1723
1724 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1725 /** @todo this really isn't nice, should properly handle this */
1726 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1727 Assert(rc != VINF_PGM_CHANGE_MODE);
1728 if (rc == VINF_SVM_VMEXIT)
1729 rc = VINF_SUCCESS;
1730 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1731 || rc == VINF_EM_RESCHEDULE_HM
1732 || rc == VINF_EM_RESCHEDULE_RAW))
1733 {
1734 rc = VINF_EM_RESCHEDULE;
1735 }
1736
1737 *pfResched = true;
1738 *pfInject = true;
1739 return rc;
1740 }
1741 }
1742
1743 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1744 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1745 {
1746 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1747 {
1748 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1749 if (RT_SUCCESS(rcStrict))
1750 {
1751 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1752 * doesn't intercept HLT but intercepts VINTR? */
1753 *pfResched = true;
1754 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1755 if (rcStrict == VINF_SVM_VMEXIT)
1756 return VINF_SUCCESS;
1757 return VBOXSTRICTRC_VAL(rcStrict);
1758 }
1759
1760 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1761 return VINF_EM_TRIPLE_FAULT;
1762 }
1763
1764 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1765 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1766 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1767 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1768 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1769
1770 *pfResched = true;
1771 *pfInject = true;
1772 return VINF_EM_RESCHEDULE;
1773 }
1774 }
1775 return VINF_SUCCESS;
1776 }
1777
1778 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1779 { /** @todo Nested VMX. */ }
1780
1781 /* Shouldn't really get here. */
1782 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1783 return VERR_EM_INTERNAL_ERROR;
1784}
1785#endif
1786
1787/**
1788 * Executes all pending forced actions.
1789 *
1790 * Forced actions can cause execution delays and execution
1791 * rescheduling. The first we deal with using action priority, so
1792 * that for instance pending timers aren't scheduled and ran until
1793 * right before execution. The rescheduling we deal with using
1794 * return codes. The same goes for VM termination, only in that case
1795 * we exit everything.
1796 *
1797 * @returns VBox status code of equal or greater importance/severity than rc.
1798 * The most important ones are: VINF_EM_RESCHEDULE,
1799 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1800 *
1801 * @param pVM The cross context VM structure.
1802 * @param pVCpu The cross context virtual CPU structure.
1803 * @param rc The current rc.
1804 *
1805 */
1806int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1807{
1808 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1809#ifdef VBOX_STRICT
1810 int rcIrq = VINF_SUCCESS;
1811#endif
1812 int rc2;
1813#define UPDATE_RC() \
1814 do { \
1815 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1816 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1817 break; \
1818 if (!rc || rc2 < rc) \
1819 rc = rc2; \
1820 } while (0)
1821 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1822
1823 /*
1824 * Post execution chunk first.
1825 */
1826 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1827 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1828 {
1829 /*
1830 * EMT Rendezvous (must be serviced before termination).
1831 */
1832 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1833 {
1834 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1835 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1836 UPDATE_RC();
1837 /** @todo HACK ALERT! The following test is to make sure EM+TM
1838 * thinks the VM is stopped/reset before the next VM state change
1839 * is made. We need a better solution for this, or at least make it
1840 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1841 * VINF_EM_SUSPEND). */
1842 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1843 {
1844 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1845 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1846 return rc;
1847 }
1848 }
1849
1850 /*
1851 * State change request (cleared by vmR3SetStateLocked).
1852 */
1853 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1854 {
1855 VMSTATE enmState = VMR3GetState(pVM);
1856 switch (enmState)
1857 {
1858 case VMSTATE_FATAL_ERROR:
1859 case VMSTATE_FATAL_ERROR_LS:
1860 case VMSTATE_GURU_MEDITATION:
1861 case VMSTATE_GURU_MEDITATION_LS:
1862 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1863 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1864 return VINF_EM_SUSPEND;
1865
1866 case VMSTATE_DESTROYING:
1867 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1868 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1869 return VINF_EM_TERMINATE;
1870
1871 default:
1872 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1873 }
1874 }
1875
1876 /*
1877 * Debugger Facility polling.
1878 */
1879 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1880 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1881 {
1882 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1883 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1884 UPDATE_RC();
1885 }
1886
1887 /*
1888 * Postponed reset request.
1889 */
1890 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1891 {
1892 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1893 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1894 UPDATE_RC();
1895 }
1896
1897#ifdef VBOX_WITH_RAW_MODE
1898 /*
1899 * CSAM page scanning.
1900 */
1901 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1902 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1903 {
1904 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1905 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1906 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1907 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1908 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1909 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1910 }
1911#endif
1912
1913 /*
1914 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1915 */
1916 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1917 {
1918 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1919 UPDATE_RC();
1920 if (rc == VINF_EM_NO_MEMORY)
1921 return rc;
1922 }
1923
1924 /* check that we got them all */
1925 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1926 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1927 }
1928
1929 /*
1930 * Normal priority then.
1931 * (Executed in no particular order.)
1932 */
1933 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1934 {
1935 /*
1936 * PDM Queues are pending.
1937 */
1938 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1939 PDMR3QueueFlushAll(pVM);
1940
1941 /*
1942 * PDM DMA transfers are pending.
1943 */
1944 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1945 PDMR3DmaRun(pVM);
1946
1947 /*
1948 * EMT Rendezvous (make sure they are handled before the requests).
1949 */
1950 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1951 {
1952 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1953 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1954 UPDATE_RC();
1955 /** @todo HACK ALERT! The following test is to make sure EM+TM
1956 * thinks the VM is stopped/reset before the next VM state change
1957 * is made. We need a better solution for this, or at least make it
1958 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1959 * VINF_EM_SUSPEND). */
1960 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1961 {
1962 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1963 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1964 return rc;
1965 }
1966 }
1967
1968 /*
1969 * Requests from other threads.
1970 */
1971 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1972 {
1973 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1974 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1975 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1976 {
1977 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1978 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1979 return rc2;
1980 }
1981 UPDATE_RC();
1982 /** @todo HACK ALERT! The following test is to make sure EM+TM
1983 * thinks the VM is stopped/reset before the next VM state change
1984 * is made. We need a better solution for this, or at least make it
1985 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1986 * VINF_EM_SUSPEND). */
1987 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1988 {
1989 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1990 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1991 return rc;
1992 }
1993 }
1994
1995#ifdef VBOX_WITH_REM
1996 /* Replay the handler notification changes. */
1997 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1998 {
1999 /* Try not to cause deadlocks. */
2000 if ( pVM->cCpus == 1
2001 || ( !PGMIsLockOwner(pVM)
2002 && !IOMIsLockWriteOwner(pVM))
2003 )
2004 {
2005 EMRemLock(pVM);
2006 REMR3ReplayHandlerNotifications(pVM);
2007 EMRemUnlock(pVM);
2008 }
2009 }
2010#endif
2011
2012 /* check that we got them all */
2013 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2014 }
2015
2016 /*
2017 * Normal priority then. (per-VCPU)
2018 * (Executed in no particular order.)
2019 */
2020 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2021 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2022 {
2023 /*
2024 * Requests from other threads.
2025 */
2026 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2027 {
2028 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2029 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2030 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2031 {
2032 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2033 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2034 return rc2;
2035 }
2036 UPDATE_RC();
2037 /** @todo HACK ALERT! The following test is to make sure EM+TM
2038 * thinks the VM is stopped/reset before the next VM state change
2039 * is made. We need a better solution for this, or at least make it
2040 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2041 * VINF_EM_SUSPEND). */
2042 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2043 {
2044 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2045 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2046 return rc;
2047 }
2048 }
2049
2050 /* check that we got them all */
2051 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2052 }
2053
2054 /*
2055 * High priority pre execution chunk last.
2056 * (Executed in ascending priority order.)
2057 */
2058 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2059 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2060 {
2061 /*
2062 * Timers before interrupts.
2063 */
2064 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2065 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2066 TMR3TimerQueuesDo(pVM);
2067
2068 /*
2069 * Pick up asynchronously posted interrupts into the APIC.
2070 */
2071 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2072 APICUpdatePendingInterrupts(pVCpu);
2073
2074 /*
2075 * The instruction following an emulated STI should *always* be executed!
2076 *
2077 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2078 * the eip is the same as the inhibited instr address. Before we
2079 * are able to execute this instruction in raw mode (iret to
2080 * guest code) an external interrupt might force a world switch
2081 * again. Possibly allowing a guest interrupt to be dispatched
2082 * in the process. This could break the guest. Sounds very
2083 * unlikely, but such timing sensitive problem are not as rare as
2084 * you might think.
2085 */
2086 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2087 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2088 {
2089 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2090 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2091 {
2092 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2093 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2094 }
2095 else
2096 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2097 }
2098
2099 /*
2100 * Interrupts.
2101 */
2102 bool fWakeupPending = false;
2103 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2104 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2105 {
2106 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2107 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2108 {
2109 Assert(!HMR3IsEventPending(pVCpu));
2110 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2111#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2112 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2113 {
2114 bool fResched, fInject;
2115 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2116 if (fInject)
2117 {
2118 fWakeupPending = true;
2119# ifdef VBOX_STRICT
2120 rcIrq = rc2;
2121# endif
2122 }
2123 if (fResched)
2124 UPDATE_RC();
2125 }
2126 else
2127#endif
2128 {
2129 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2130 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2131#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2132 && pCtx->hwvirt.fGif
2133#endif
2134#ifdef VBOX_WITH_RAW_MODE
2135 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2136#endif
2137 && pCtx->eflags.Bits.u1IF)
2138 {
2139 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2140 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2141 /** @todo this really isn't nice, should properly handle this */
2142 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2143 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2144 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2145 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2146 || rc2 == VINF_EM_RESCHEDULE_HM
2147 || rc2 == VINF_EM_RESCHEDULE_RAW))
2148 {
2149 rc2 = VINF_EM_RESCHEDULE;
2150 }
2151#ifdef VBOX_STRICT
2152 rcIrq = rc2;
2153#endif
2154 UPDATE_RC();
2155 /* Reschedule required: We must not miss the wakeup below! */
2156 fWakeupPending = true;
2157 }
2158 }
2159 }
2160 }
2161
2162 /*
2163 * Allocate handy pages.
2164 */
2165 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2166 {
2167 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2168 UPDATE_RC();
2169 }
2170
2171 /*
2172 * Debugger Facility request.
2173 */
2174 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2175 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2176 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2177 {
2178 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2179 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2180 UPDATE_RC();
2181 }
2182
2183 /*
2184 * EMT Rendezvous (must be serviced before termination).
2185 */
2186 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2187 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2188 {
2189 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2190 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2191 UPDATE_RC();
2192 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2193 * stopped/reset before the next VM state change is made. We need a better
2194 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2195 * && rc >= VINF_EM_SUSPEND). */
2196 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2197 {
2198 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2199 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2200 return rc;
2201 }
2202 }
2203
2204 /*
2205 * State change request (cleared by vmR3SetStateLocked).
2206 */
2207 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2208 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2209 {
2210 VMSTATE enmState = VMR3GetState(pVM);
2211 switch (enmState)
2212 {
2213 case VMSTATE_FATAL_ERROR:
2214 case VMSTATE_FATAL_ERROR_LS:
2215 case VMSTATE_GURU_MEDITATION:
2216 case VMSTATE_GURU_MEDITATION_LS:
2217 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2218 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2219 return VINF_EM_SUSPEND;
2220
2221 case VMSTATE_DESTROYING:
2222 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2223 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2224 return VINF_EM_TERMINATE;
2225
2226 default:
2227 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2228 }
2229 }
2230
2231 /*
2232 * Out of memory? Since most of our fellow high priority actions may cause us
2233 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2234 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2235 * than us since we can terminate without allocating more memory.
2236 */
2237 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2238 {
2239 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2240 UPDATE_RC();
2241 if (rc == VINF_EM_NO_MEMORY)
2242 return rc;
2243 }
2244
2245 /*
2246 * If the virtual sync clock is still stopped, make TM restart it.
2247 */
2248 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2249 TMR3VirtualSyncFF(pVM, pVCpu);
2250
2251#ifdef DEBUG
2252 /*
2253 * Debug, pause the VM.
2254 */
2255 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2256 {
2257 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2258 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2259 return VINF_EM_SUSPEND;
2260 }
2261#endif
2262
2263 /* check that we got them all */
2264 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2265 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2266 }
2267
2268#undef UPDATE_RC
2269 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2270 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2271 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2272 return rc;
2273}
2274
2275
2276/**
2277 * Check if the preset execution time cap restricts guest execution scheduling.
2278 *
2279 * @returns true if allowed, false otherwise
2280 * @param pVM The cross context VM structure.
2281 * @param pVCpu The cross context virtual CPU structure.
2282 */
2283bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2284{
2285 uint64_t u64UserTime, u64KernelTime;
2286
2287 if ( pVM->uCpuExecutionCap != 100
2288 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2289 {
2290 uint64_t u64TimeNow = RTTimeMilliTS();
2291 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2292 {
2293 /* New time slice. */
2294 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2295 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2296 pVCpu->em.s.u64TimeSliceExec = 0;
2297 }
2298 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2299
2300 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2301 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2302 return false;
2303 }
2304 return true;
2305}
2306
2307
2308/**
2309 * Execute VM.
2310 *
2311 * This function is the main loop of the VM. The emulation thread
2312 * calls this function when the VM has been successfully constructed
2313 * and we're ready for executing the VM.
2314 *
2315 * Returning from this function means that the VM is turned off or
2316 * suspended (state already saved) and deconstruction is next in line.
2317 *
2318 * All interaction from other thread are done using forced actions
2319 * and signaling of the wait object.
2320 *
2321 * @returns VBox status code, informational status codes may indicate failure.
2322 * @param pVM The cross context VM structure.
2323 * @param pVCpu The cross context virtual CPU structure.
2324 */
2325VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2326{
2327 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2328 pVM,
2329 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2330 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2331 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2332 pVCpu->em.s.fForceRAW));
2333 VM_ASSERT_EMT(pVM);
2334 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2335 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2336 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2337 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2338
2339 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2340 if (rc == 0)
2341 {
2342 /*
2343 * Start the virtual time.
2344 */
2345 TMR3NotifyResume(pVM, pVCpu);
2346
2347 /*
2348 * The Outer Main Loop.
2349 */
2350 bool fFFDone = false;
2351
2352 /* Reschedule right away to start in the right state. */
2353 rc = VINF_SUCCESS;
2354
2355 /* If resuming after a pause or a state load, restore the previous
2356 state or else we'll start executing code. Else, just reschedule. */
2357 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2358 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2359 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2360 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2361 else
2362 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2363 pVCpu->em.s.cIemThenRemInstructions = 0;
2364 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2365
2366 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2367 for (;;)
2368 {
2369 /*
2370 * Before we can schedule anything (we're here because
2371 * scheduling is required) we must service any pending
2372 * forced actions to avoid any pending action causing
2373 * immediate rescheduling upon entering an inner loop
2374 *
2375 * Do forced actions.
2376 */
2377 if ( !fFFDone
2378 && RT_SUCCESS(rc)
2379 && rc != VINF_EM_TERMINATE
2380 && rc != VINF_EM_OFF
2381 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2382 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2383 {
2384 rc = emR3ForcedActions(pVM, pVCpu, rc);
2385 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2386 if ( ( rc == VINF_EM_RESCHEDULE_REM
2387 || rc == VINF_EM_RESCHEDULE_HM)
2388 && pVCpu->em.s.fForceRAW)
2389 rc = VINF_EM_RESCHEDULE_RAW;
2390 }
2391 else if (fFFDone)
2392 fFFDone = false;
2393
2394 /*
2395 * Now what to do?
2396 */
2397 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2398 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2399 switch (rc)
2400 {
2401 /*
2402 * Keep doing what we're currently doing.
2403 */
2404 case VINF_SUCCESS:
2405 break;
2406
2407 /*
2408 * Reschedule - to raw-mode execution.
2409 */
2410/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2411 case VINF_EM_RESCHEDULE_RAW:
2412 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2413 if (VM_IS_RAW_MODE_ENABLED(pVM))
2414 {
2415 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2416 pVCpu->em.s.enmState = EMSTATE_RAW;
2417 }
2418 else
2419 {
2420 AssertLogRelFailed();
2421 pVCpu->em.s.enmState = EMSTATE_NONE;
2422 }
2423 break;
2424
2425 /*
2426 * Reschedule - to HM or NEM.
2427 */
2428 case VINF_EM_RESCHEDULE_HM:
2429 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2430 Assert(!pVCpu->em.s.fForceRAW);
2431 if (VM_IS_HM_ENABLED(pVM))
2432 {
2433 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2434 pVCpu->em.s.enmState = EMSTATE_HM;
2435 }
2436 else if (VM_IS_NEM_ENABLED(pVM))
2437 {
2438 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2439 pVCpu->em.s.enmState = EMSTATE_NEM;
2440 }
2441 else
2442 {
2443 AssertLogRelFailed();
2444 pVCpu->em.s.enmState = EMSTATE_NONE;
2445 }
2446 break;
2447
2448 /*
2449 * Reschedule - to recompiled execution.
2450 */
2451 case VINF_EM_RESCHEDULE_REM:
2452 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2453 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2454 {
2455 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2456 enmOldState, EMSTATE_IEM_THEN_REM));
2457 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2458 {
2459 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2460 pVCpu->em.s.cIemThenRemInstructions = 0;
2461 }
2462 }
2463 else
2464 {
2465 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2466 pVCpu->em.s.enmState = EMSTATE_REM;
2467 }
2468 break;
2469
2470 /*
2471 * Resume.
2472 */
2473 case VINF_EM_RESUME:
2474 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2475 /* Don't reschedule in the halted or wait for SIPI case. */
2476 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2477 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2478 {
2479 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2480 break;
2481 }
2482 /* fall through and get scheduled. */
2483 RT_FALL_THRU();
2484
2485 /*
2486 * Reschedule.
2487 */
2488 case VINF_EM_RESCHEDULE:
2489 {
2490 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2491 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2492 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2493 pVCpu->em.s.cIemThenRemInstructions = 0;
2494 pVCpu->em.s.enmState = enmState;
2495 break;
2496 }
2497
2498 /*
2499 * Halted.
2500 */
2501 case VINF_EM_HALT:
2502 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2503 pVCpu->em.s.enmState = EMSTATE_HALTED;
2504 break;
2505
2506 /*
2507 * Switch to the wait for SIPI state (application processor only)
2508 */
2509 case VINF_EM_WAIT_SIPI:
2510 Assert(pVCpu->idCpu != 0);
2511 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2512 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2513 break;
2514
2515
2516 /*
2517 * Suspend.
2518 */
2519 case VINF_EM_SUSPEND:
2520 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2521 Assert(enmOldState != EMSTATE_SUSPENDED);
2522 pVCpu->em.s.enmPrevState = enmOldState;
2523 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2524 break;
2525
2526 /*
2527 * Reset.
2528 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2529 */
2530 case VINF_EM_RESET:
2531 {
2532 if (pVCpu->idCpu == 0)
2533 {
2534 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2535 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2536 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2537 pVCpu->em.s.cIemThenRemInstructions = 0;
2538 pVCpu->em.s.enmState = enmState;
2539 }
2540 else
2541 {
2542 /* All other VCPUs go into the wait for SIPI state. */
2543 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2544 }
2545 break;
2546 }
2547
2548 /*
2549 * Power Off.
2550 */
2551 case VINF_EM_OFF:
2552 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2553 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2554 TMR3NotifySuspend(pVM, pVCpu);
2555 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2556 return rc;
2557
2558 /*
2559 * Terminate the VM.
2560 */
2561 case VINF_EM_TERMINATE:
2562 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2563 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2564 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2565 TMR3NotifySuspend(pVM, pVCpu);
2566 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2567 return rc;
2568
2569
2570 /*
2571 * Out of memory, suspend the VM and stuff.
2572 */
2573 case VINF_EM_NO_MEMORY:
2574 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2575 Assert(enmOldState != EMSTATE_SUSPENDED);
2576 pVCpu->em.s.enmPrevState = enmOldState;
2577 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2578 TMR3NotifySuspend(pVM, pVCpu);
2579 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2580
2581 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2582 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2583 if (rc != VINF_EM_SUSPEND)
2584 {
2585 if (RT_SUCCESS_NP(rc))
2586 {
2587 AssertLogRelMsgFailed(("%Rrc\n", rc));
2588 rc = VERR_EM_INTERNAL_ERROR;
2589 }
2590 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2591 }
2592 return rc;
2593
2594 /*
2595 * Guest debug events.
2596 */
2597 case VINF_EM_DBG_STEPPED:
2598 case VINF_EM_DBG_STOP:
2599 case VINF_EM_DBG_EVENT:
2600 case VINF_EM_DBG_BREAKPOINT:
2601 case VINF_EM_DBG_STEP:
2602 if (enmOldState == EMSTATE_RAW)
2603 {
2604 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2605 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2606 }
2607 else if (enmOldState == EMSTATE_HM)
2608 {
2609 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2610 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2611 }
2612 else if (enmOldState == EMSTATE_NEM)
2613 {
2614 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2615 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2616 }
2617 else if (enmOldState == EMSTATE_REM)
2618 {
2619 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2620 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2621 }
2622 else
2623 {
2624 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2625 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2626 }
2627 break;
2628
2629 /*
2630 * Hypervisor debug events.
2631 */
2632 case VINF_EM_DBG_HYPER_STEPPED:
2633 case VINF_EM_DBG_HYPER_BREAKPOINT:
2634 case VINF_EM_DBG_HYPER_ASSERTION:
2635 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2636 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2637 break;
2638
2639 /*
2640 * Triple fault.
2641 */
2642 case VINF_EM_TRIPLE_FAULT:
2643 if (!pVM->em.s.fGuruOnTripleFault)
2644 {
2645 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2646 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2647 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2648 continue;
2649 }
2650 /* Else fall through and trigger a guru. */
2651 RT_FALL_THRU();
2652
2653 case VERR_VMM_RING0_ASSERTION:
2654 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2655 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2656 break;
2657
2658 /*
2659 * Any error code showing up here other than the ones we
2660 * know and process above are considered to be FATAL.
2661 *
2662 * Unknown warnings and informational status codes are also
2663 * included in this.
2664 */
2665 default:
2666 if (RT_SUCCESS_NP(rc))
2667 {
2668 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2669 rc = VERR_EM_INTERNAL_ERROR;
2670 }
2671 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2672 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2673 break;
2674 }
2675
2676 /*
2677 * Act on state transition.
2678 */
2679 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2680 if (enmOldState != enmNewState)
2681 {
2682 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2683
2684 /* Clear MWait flags and the unhalt FF. */
2685 if ( enmOldState == EMSTATE_HALTED
2686 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2687 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2688 && ( enmNewState == EMSTATE_RAW
2689 || enmNewState == EMSTATE_HM
2690 || enmNewState == EMSTATE_NEM
2691 || enmNewState == EMSTATE_REM
2692 || enmNewState == EMSTATE_IEM_THEN_REM
2693 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2694 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2695 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2696 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2697 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2698 {
2699 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2700 {
2701 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2702 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2703 }
2704 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2705 {
2706 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2707 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2708 }
2709 }
2710 }
2711 else
2712 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2713
2714 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2715 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2716
2717 /*
2718 * Act on the new state.
2719 */
2720 switch (enmNewState)
2721 {
2722 /*
2723 * Execute raw.
2724 */
2725 case EMSTATE_RAW:
2726#ifdef VBOX_WITH_RAW_MODE
2727 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2728#else
2729 AssertLogRelMsgFailed(("%Rrc\n", rc));
2730 rc = VERR_EM_INTERNAL_ERROR;
2731#endif
2732 break;
2733
2734 /*
2735 * Execute hardware accelerated raw.
2736 */
2737 case EMSTATE_HM:
2738 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2739 break;
2740
2741 /*
2742 * Execute hardware accelerated raw.
2743 */
2744 case EMSTATE_NEM:
2745 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2746 break;
2747
2748 /*
2749 * Execute recompiled.
2750 */
2751 case EMSTATE_REM:
2752 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2753 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2754 break;
2755
2756 /*
2757 * Execute in the interpreter.
2758 */
2759 case EMSTATE_IEM:
2760 {
2761#if 0 /* For testing purposes. */
2762 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2763 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2764 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2765 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2766 rc = VINF_SUCCESS;
2767 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2768#endif
2769 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2770 if (pVM->em.s.fIemExecutesAll)
2771 {
2772 Assert(rc != VINF_EM_RESCHEDULE_REM);
2773 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2774 Assert(rc != VINF_EM_RESCHEDULE_HM);
2775 }
2776 fFFDone = false;
2777 break;
2778 }
2779
2780 /*
2781 * Execute in IEM, hoping we can quickly switch aback to HM
2782 * or RAW execution. If our hopes fail, we go to REM.
2783 */
2784 case EMSTATE_IEM_THEN_REM:
2785 {
2786 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2787 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2788 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2789 break;
2790 }
2791
2792 /*
2793 * Application processor execution halted until SIPI.
2794 */
2795 case EMSTATE_WAIT_SIPI:
2796 /* no break */
2797 /*
2798 * hlt - execution halted until interrupt.
2799 */
2800 case EMSTATE_HALTED:
2801 {
2802 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2803 /* If HM (or someone else) store a pending interrupt in
2804 TRPM, it must be dispatched ASAP without any halting.
2805 Anything pending in TRPM has been accepted and the CPU
2806 should already be the right state to receive it. */
2807 if (TRPMHasTrap(pVCpu))
2808 rc = VINF_EM_RESCHEDULE;
2809 /* MWAIT has a special extension where it's woken up when
2810 an interrupt is pending even when IF=0. */
2811 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2812 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2813 {
2814 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2815 if (rc == VINF_SUCCESS)
2816 {
2817 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2818 APICUpdatePendingInterrupts(pVCpu);
2819
2820 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2821 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2822 {
2823 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2824 rc = VINF_EM_RESCHEDULE;
2825 }
2826 }
2827 }
2828 else
2829 {
2830 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2831 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2832 check VMCPU_FF_UPDATE_APIC here. */
2833 if ( rc == VINF_SUCCESS
2834 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2835 {
2836 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2837 rc = VINF_EM_RESCHEDULE;
2838 }
2839 }
2840
2841 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2842 break;
2843 }
2844
2845 /*
2846 * Suspended - return to VM.cpp.
2847 */
2848 case EMSTATE_SUSPENDED:
2849 TMR3NotifySuspend(pVM, pVCpu);
2850 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2851 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2852 return VINF_EM_SUSPEND;
2853
2854 /*
2855 * Debugging in the guest.
2856 */
2857 case EMSTATE_DEBUG_GUEST_RAW:
2858 case EMSTATE_DEBUG_GUEST_HM:
2859 case EMSTATE_DEBUG_GUEST_NEM:
2860 case EMSTATE_DEBUG_GUEST_IEM:
2861 case EMSTATE_DEBUG_GUEST_REM:
2862 TMR3NotifySuspend(pVM, pVCpu);
2863 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2864 TMR3NotifyResume(pVM, pVCpu);
2865 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2866 break;
2867
2868 /*
2869 * Debugging in the hypervisor.
2870 */
2871 case EMSTATE_DEBUG_HYPER:
2872 {
2873 TMR3NotifySuspend(pVM, pVCpu);
2874 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2875
2876 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2877 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2878 if (rc != VINF_SUCCESS)
2879 {
2880 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2881 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2882 else
2883 {
2884 /* switch to guru meditation mode */
2885 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2886 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2887 VMMR3FatalDump(pVM, pVCpu, rc);
2888 }
2889 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2890 return rc;
2891 }
2892
2893 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2894 TMR3NotifyResume(pVM, pVCpu);
2895 break;
2896 }
2897
2898 /*
2899 * Guru meditation takes place in the debugger.
2900 */
2901 case EMSTATE_GURU_MEDITATION:
2902 {
2903 TMR3NotifySuspend(pVM, pVCpu);
2904 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2905 VMMR3FatalDump(pVM, pVCpu, rc);
2906 emR3Debug(pVM, pVCpu, rc);
2907 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2908 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2909 return rc;
2910 }
2911
2912 /*
2913 * The states we don't expect here.
2914 */
2915 case EMSTATE_NONE:
2916 case EMSTATE_TERMINATING:
2917 default:
2918 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2919 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2920 TMR3NotifySuspend(pVM, pVCpu);
2921 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2922 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2923 return VERR_EM_INTERNAL_ERROR;
2924 }
2925 } /* The Outer Main Loop */
2926 }
2927 else
2928 {
2929 /*
2930 * Fatal error.
2931 */
2932 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2933 TMR3NotifySuspend(pVM, pVCpu);
2934 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2935 VMMR3FatalDump(pVM, pVCpu, rc);
2936 emR3Debug(pVM, pVCpu, rc);
2937 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2938 /** @todo change the VM state! */
2939 return rc;
2940 }
2941
2942 /* not reached */
2943}
2944
2945/**
2946 * Notify EM of a state change (used by FTM)
2947 *
2948 * @param pVM The cross context VM structure.
2949 */
2950VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2951{
2952 PVMCPU pVCpu = VMMGetCpu(pVM);
2953
2954 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2955 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2956 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2957 return VINF_SUCCESS;
2958}
2959
2960/**
2961 * Notify EM of a state change (used by FTM)
2962 *
2963 * @param pVM The cross context VM structure.
2964 */
2965VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2966{
2967 PVMCPU pVCpu = VMMGetCpu(pVM);
2968 EMSTATE enmCurState = pVCpu->em.s.enmState;
2969
2970 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2971 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2972 pVCpu->em.s.enmPrevState = enmCurState;
2973 return VINF_SUCCESS;
2974}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette