VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72552

Last change on this file since 72552 was 72490, checked in by vboxsync, 7 years ago

NEM,EM: Generic optimization of I/O port accesses that have to be executed in ring-3. Only NEM uses the feature for now. bugref:9044 bugref:9193

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 139.1 KB
Line 
1/* $Id: EM.cpp 72490 2018-06-09 15:11:13Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94#if defined(VBOX_WITH_REM) || defined(DEBUG)
95static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
96#endif
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
153
154#ifdef VBOX_WITH_REM
155 /*
156 * Initialize the REM critical section.
157 */
158 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
159 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
160 AssertRCReturn(rc, rc);
161#endif
162
163 /*
164 * Saved state.
165 */
166 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
167 NULL, NULL, NULL,
168 NULL, emR3Save, NULL,
169 NULL, emR3Load, NULL);
170 if (RT_FAILURE(rc))
171 return rc;
172
173 for (VMCPUID i = 0; i < pVM->cCpus; i++)
174 {
175 PVMCPU pVCpu = &pVM->aCpus[i];
176
177 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
178 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
179 pVCpu->em.s.fForceRAW = false;
180
181 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
182#ifdef VBOX_WITH_RAW_MODE
183 if (VM_IS_RAW_MODE_ENABLED(pVM))
184 {
185 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
186 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
187 }
188#endif
189
190 /* Force reset of the time slice. */
191 pVCpu->em.s.u64TimeSliceStart = 0;
192
193# define EM_REG_COUNTER(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_COUNTER_USED(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
199 AssertRC(rc);
200
201# define EM_REG_PROFILE(a, b, c) \
202 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
203 AssertRC(rc);
204
205# define EM_REG_PROFILE_ADV(a, b, c) \
206 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
207 AssertRC(rc);
208
209 /*
210 * Statistics.
211 */
212#ifdef VBOX_WITH_STATISTICS
213 PEMSTATS pStats;
214 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 pVCpu->em.s.pStatsR3 = pStats;
219 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
220 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
221
222 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
223 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
224
225 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
227
228 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
302
303 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
304 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
305
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
358
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
387
388 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
390 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
392
393 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
432#endif /* VBOX_WITH_STATISTICS */
433 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
434 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
435#ifdef VBOX_WITH_STATISTICS
436 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
440 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
441 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
442#endif /* VBOX_WITH_STATISTICS */
443
444 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
445 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
446 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
447 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
448 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
449
450 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
451 }
452
453 emR3InitDbg(pVM);
454 return VINF_SUCCESS;
455}
456
457
458/**
459 * Applies relocations to data and code managed by this
460 * component. This function will be called at init and
461 * whenever the VMM need to relocate it self inside the GC.
462 *
463 * @param pVM The cross context VM structure.
464 */
465VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
466{
467 LogFlow(("EMR3Relocate\n"));
468 for (VMCPUID i = 0; i < pVM->cCpus; i++)
469 {
470 PVMCPU pVCpu = &pVM->aCpus[i];
471 if (pVCpu->em.s.pStatsR3)
472 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
473 }
474}
475
476
477/**
478 * Reset the EM state for a CPU.
479 *
480 * Called by EMR3Reset and hot plugging.
481 *
482 * @param pVCpu The cross context virtual CPU structure.
483 */
484VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
485{
486 /* Reset scheduling state. */
487 pVCpu->em.s.fForceRAW = false;
488 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
489
490 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
491 out of the HALTED state here so that enmPrevState doesn't end up as
492 HALTED when EMR3Execute returns. */
493 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
494 {
495 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
496 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
497 }
498}
499
500
501/**
502 * Reset notification.
503 *
504 * @param pVM The cross context VM structure.
505 */
506VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
507{
508 Log(("EMR3Reset: \n"));
509 for (VMCPUID i = 0; i < pVM->cCpus; i++)
510 EMR3ResetCpu(&pVM->aCpus[i]);
511}
512
513
514/**
515 * Terminates the EM.
516 *
517 * Termination means cleaning up and freeing all resources,
518 * the VM it self is at this point powered off or suspended.
519 *
520 * @returns VBox status code.
521 * @param pVM The cross context VM structure.
522 */
523VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
524{
525 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
526
527#ifdef VBOX_WITH_REM
528 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
529#else
530 RT_NOREF(pVM);
531#endif
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * Execute state save operation.
538 *
539 * @returns VBox status code.
540 * @param pVM The cross context VM structure.
541 * @param pSSM SSM operation handle.
542 */
543static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
544{
545 for (VMCPUID i = 0; i < pVM->cCpus; i++)
546 {
547 PVMCPU pVCpu = &pVM->aCpus[i];
548
549 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
550
551 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
552 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
553 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
554
555 /* Save mwait state. */
556 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
557 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
558 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
559 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
560 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
561 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
562 AssertRCReturn(rc, rc);
563 }
564 return VINF_SUCCESS;
565}
566
567
568/**
569 * Execute state load operation.
570 *
571 * @returns VBox status code.
572 * @param pVM The cross context VM structure.
573 * @param pSSM SSM operation handle.
574 * @param uVersion Data layout version.
575 * @param uPass The data pass.
576 */
577static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
578{
579 /*
580 * Validate version.
581 */
582 if ( uVersion > EM_SAVED_STATE_VERSION
583 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
584 {
585 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
586 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
587 }
588 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
589
590 /*
591 * Load the saved state.
592 */
593 for (VMCPUID i = 0; i < pVM->cCpus; i++)
594 {
595 PVMCPU pVCpu = &pVM->aCpus[i];
596
597 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
598 if (RT_FAILURE(rc))
599 pVCpu->em.s.fForceRAW = false;
600 AssertRCReturn(rc, rc);
601
602 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
603 {
604 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
605 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
606 AssertRCReturn(rc, rc);
607 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
608
609 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
610 }
611 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
612 {
613 /* Load mwait state. */
614 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
615 AssertRCReturn(rc, rc);
616 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
617 AssertRCReturn(rc, rc);
618 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
619 AssertRCReturn(rc, rc);
620 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
621 AssertRCReturn(rc, rc);
622 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
623 AssertRCReturn(rc, rc);
624 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
625 AssertRCReturn(rc, rc);
626 }
627
628 Assert(!pVCpu->em.s.pCliStatTree);
629 }
630 return VINF_SUCCESS;
631}
632
633
634/**
635 * Argument packet for emR3SetExecutionPolicy.
636 */
637struct EMR3SETEXECPOLICYARGS
638{
639 EMEXECPOLICY enmPolicy;
640 bool fEnforce;
641};
642
643
644/**
645 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
646 */
647static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
648{
649 /*
650 * Only the first CPU changes the variables.
651 */
652 if (pVCpu->idCpu == 0)
653 {
654 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
655 switch (pArgs->enmPolicy)
656 {
657 case EMEXECPOLICY_RECOMPILE_RING0:
658 pVM->fRecompileSupervisor = pArgs->fEnforce;
659 break;
660 case EMEXECPOLICY_RECOMPILE_RING3:
661 pVM->fRecompileUser = pArgs->fEnforce;
662 break;
663 case EMEXECPOLICY_IEM_ALL:
664 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
665 break;
666 default:
667 AssertFailedReturn(VERR_INVALID_PARAMETER);
668 }
669 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
670 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
671 }
672
673 /*
674 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
675 */
676 return pVCpu->em.s.enmState == EMSTATE_RAW
677 || pVCpu->em.s.enmState == EMSTATE_HM
678 || pVCpu->em.s.enmState == EMSTATE_NEM
679 || pVCpu->em.s.enmState == EMSTATE_IEM
680 || pVCpu->em.s.enmState == EMSTATE_REM
681 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
682 ? VINF_EM_RESCHEDULE
683 : VINF_SUCCESS;
684}
685
686
687/**
688 * Changes an execution scheduling policy parameter.
689 *
690 * This is used to enable or disable raw-mode / hardware-virtualization
691 * execution of user and supervisor code.
692 *
693 * @returns VINF_SUCCESS on success.
694 * @returns VINF_RESCHEDULE if a rescheduling might be required.
695 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
696 *
697 * @param pUVM The user mode VM handle.
698 * @param enmPolicy The scheduling policy to change.
699 * @param fEnforce Whether to enforce the policy or not.
700 */
701VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
702{
703 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
704 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
705 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
706
707 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
708 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
709}
710
711
712/**
713 * Queries an execution scheduling policy parameter.
714 *
715 * @returns VBox status code
716 * @param pUVM The user mode VM handle.
717 * @param enmPolicy The scheduling policy to query.
718 * @param pfEnforced Where to return the current value.
719 */
720VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
721{
722 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
723 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
724 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
725 PVM pVM = pUVM->pVM;
726 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
727
728 /* No need to bother EMTs with a query. */
729 switch (enmPolicy)
730 {
731 case EMEXECPOLICY_RECOMPILE_RING0:
732 *pfEnforced = pVM->fRecompileSupervisor;
733 break;
734 case EMEXECPOLICY_RECOMPILE_RING3:
735 *pfEnforced = pVM->fRecompileUser;
736 break;
737 case EMEXECPOLICY_IEM_ALL:
738 *pfEnforced = pVM->em.s.fIemExecutesAll;
739 break;
740 default:
741 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
742 }
743
744 return VINF_SUCCESS;
745}
746
747
748/**
749 * Queries the main execution engine of the VM.
750 *
751 * @returns VBox status code
752 * @param pUVM The user mode VM handle.
753 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
754 */
755VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
756{
757 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
758 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
759
760 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
761 PVM pVM = pUVM->pVM;
762 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
763
764 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
765 return VINF_SUCCESS;
766}
767
768
769/**
770 * Raise a fatal error.
771 *
772 * Safely terminate the VM with full state report and stuff. This function
773 * will naturally never return.
774 *
775 * @param pVCpu The cross context virtual CPU structure.
776 * @param rc VBox status code.
777 */
778VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
779{
780 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
781 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
782}
783
784
785#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
786/**
787 * Gets the EM state name.
788 *
789 * @returns pointer to read only state name,
790 * @param enmState The state.
791 */
792static const char *emR3GetStateName(EMSTATE enmState)
793{
794 switch (enmState)
795 {
796 case EMSTATE_NONE: return "EMSTATE_NONE";
797 case EMSTATE_RAW: return "EMSTATE_RAW";
798 case EMSTATE_HM: return "EMSTATE_HM";
799 case EMSTATE_IEM: return "EMSTATE_IEM";
800 case EMSTATE_REM: return "EMSTATE_REM";
801 case EMSTATE_HALTED: return "EMSTATE_HALTED";
802 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
803 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
804 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
805 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
806 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
807 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
808 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
809 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
810 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
811 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
812 case EMSTATE_NEM: return "EMSTATE_NEM";
813 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
814 default: return "Unknown!";
815 }
816}
817#endif /* LOG_ENABLED || VBOX_STRICT */
818
819
820/**
821 * Handle pending ring-3 I/O port write.
822 *
823 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
824 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
825 *
826 * @returns Strict VBox status code.
827 * @param pVM The cross context VM structure.
828 * @param pVCpu The cross context virtual CPU structure.
829 */
830VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
831{
832 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
833
834 /* Get and clear the pending data. */
835 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
836 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
837 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
838 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
839 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
840
841 /* Assert sanity. */
842 switch (cbValue)
843 {
844 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
845 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
846 case 4: break;
847 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
848 }
849 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
850
851 /* Do the work.*/
852 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
853 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
854 if (IOM_SUCCESS(rcStrict))
855 {
856 pVCpu->cpum.GstCtx.rip += cbInstr;
857 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
858 }
859 return rcStrict;
860}
861
862
863/**
864 * Handle pending ring-3 I/O port write.
865 *
866 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
867 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
868 *
869 * @returns Strict VBox status code.
870 * @param pVM The cross context VM structure.
871 * @param pVCpu The cross context virtual CPU structure.
872 */
873VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
874{
875 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
876
877 /* Get and clear the pending data. */
878 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
879 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
880 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
881 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
882
883 /* Assert sanity. */
884 switch (cbValue)
885 {
886 case 1: break;
887 case 2: break;
888 case 4: break;
889 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
890 }
891 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
892 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
893
894 /* Do the work.*/
895 uint32_t uValue = 0;
896 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
897 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
898 if (IOM_SUCCESS(rcStrict))
899 {
900 if (cbValue == 4)
901 pVCpu->cpum.GstCtx.rax = uValue;
902 else if (cbValue == 2)
903 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
904 else
905 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
906 pVCpu->cpum.GstCtx.rip += cbInstr;
907 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
908 }
909 return rcStrict;
910}
911
912
913/**
914 * Debug loop.
915 *
916 * @returns VBox status code for EM.
917 * @param pVM The cross context VM structure.
918 * @param pVCpu The cross context virtual CPU structure.
919 * @param rc Current EM VBox status code.
920 */
921static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
922{
923 for (;;)
924 {
925 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
926 const VBOXSTRICTRC rcLast = rc;
927
928 /*
929 * Debug related RC.
930 */
931 switch (VBOXSTRICTRC_VAL(rc))
932 {
933 /*
934 * Single step an instruction.
935 */
936 case VINF_EM_DBG_STEP:
937 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
938 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
939 || pVCpu->em.s.fForceRAW /* paranoia */)
940#ifdef VBOX_WITH_RAW_MODE
941 rc = emR3RawStep(pVM, pVCpu);
942#else
943 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
944#endif
945 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
946 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
947 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
948 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
949#ifdef VBOX_WITH_REM
950 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
951 rc = emR3RemStep(pVM, pVCpu);
952#endif
953 else
954 {
955 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
956 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
957 rc = VINF_EM_DBG_STEPPED;
958 }
959 break;
960
961 /*
962 * Simple events: stepped, breakpoint, stop/assertion.
963 */
964 case VINF_EM_DBG_STEPPED:
965 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
966 break;
967
968 case VINF_EM_DBG_BREAKPOINT:
969 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
970 break;
971
972 case VINF_EM_DBG_STOP:
973 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
974 break;
975
976 case VINF_EM_DBG_EVENT:
977 rc = DBGFR3EventHandlePending(pVM, pVCpu);
978 break;
979
980 case VINF_EM_DBG_HYPER_STEPPED:
981 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
982 break;
983
984 case VINF_EM_DBG_HYPER_BREAKPOINT:
985 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
986 break;
987
988 case VINF_EM_DBG_HYPER_ASSERTION:
989 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
990 RTLogFlush(NULL);
991 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
992 break;
993
994 /*
995 * Guru meditation.
996 */
997 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
998 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
999 break;
1000 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
1001 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
1002 break;
1003 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
1004 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
1005 break;
1006
1007 default: /** @todo don't use default for guru, but make special errors code! */
1008 {
1009 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1010 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
1011 break;
1012 }
1013 }
1014
1015 /*
1016 * Process the result.
1017 */
1018 switch (VBOXSTRICTRC_VAL(rc))
1019 {
1020 /*
1021 * Continue the debugging loop.
1022 */
1023 case VINF_EM_DBG_STEP:
1024 case VINF_EM_DBG_STOP:
1025 case VINF_EM_DBG_EVENT:
1026 case VINF_EM_DBG_STEPPED:
1027 case VINF_EM_DBG_BREAKPOINT:
1028 case VINF_EM_DBG_HYPER_STEPPED:
1029 case VINF_EM_DBG_HYPER_BREAKPOINT:
1030 case VINF_EM_DBG_HYPER_ASSERTION:
1031 break;
1032
1033 /*
1034 * Resuming execution (in some form) has to be done here if we got
1035 * a hypervisor debug event.
1036 */
1037 case VINF_SUCCESS:
1038 case VINF_EM_RESUME:
1039 case VINF_EM_SUSPEND:
1040 case VINF_EM_RESCHEDULE:
1041 case VINF_EM_RESCHEDULE_RAW:
1042 case VINF_EM_RESCHEDULE_REM:
1043 case VINF_EM_HALT:
1044 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1045 {
1046#ifdef VBOX_WITH_RAW_MODE
1047 rc = emR3RawResumeHyper(pVM, pVCpu);
1048 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
1049 continue;
1050#else
1051 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
1052#endif
1053 }
1054 if (rc == VINF_SUCCESS)
1055 rc = VINF_EM_RESCHEDULE;
1056 return rc;
1057
1058 /*
1059 * The debugger isn't attached.
1060 * We'll simply turn the thing off since that's the easiest thing to do.
1061 */
1062 case VERR_DBGF_NOT_ATTACHED:
1063 switch (VBOXSTRICTRC_VAL(rcLast))
1064 {
1065 case VINF_EM_DBG_HYPER_STEPPED:
1066 case VINF_EM_DBG_HYPER_BREAKPOINT:
1067 case VINF_EM_DBG_HYPER_ASSERTION:
1068 case VERR_TRPM_PANIC:
1069 case VERR_TRPM_DONT_PANIC:
1070 case VERR_VMM_RING0_ASSERTION:
1071 case VERR_VMM_HYPER_CR3_MISMATCH:
1072 case VERR_VMM_RING3_CALL_DISABLED:
1073 return rcLast;
1074 }
1075 return VINF_EM_OFF;
1076
1077 /*
1078 * Status codes terminating the VM in one or another sense.
1079 */
1080 case VINF_EM_TERMINATE:
1081 case VINF_EM_OFF:
1082 case VINF_EM_RESET:
1083 case VINF_EM_NO_MEMORY:
1084 case VINF_EM_RAW_STALE_SELECTOR:
1085 case VINF_EM_RAW_IRET_TRAP:
1086 case VERR_TRPM_PANIC:
1087 case VERR_TRPM_DONT_PANIC:
1088 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1089 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1090 case VERR_VMM_RING0_ASSERTION:
1091 case VERR_VMM_HYPER_CR3_MISMATCH:
1092 case VERR_VMM_RING3_CALL_DISABLED:
1093 case VERR_INTERNAL_ERROR:
1094 case VERR_INTERNAL_ERROR_2:
1095 case VERR_INTERNAL_ERROR_3:
1096 case VERR_INTERNAL_ERROR_4:
1097 case VERR_INTERNAL_ERROR_5:
1098 case VERR_IPE_UNEXPECTED_STATUS:
1099 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1100 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1101 return rc;
1102
1103 /*
1104 * The rest is unexpected, and will keep us here.
1105 */
1106 default:
1107 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1108 break;
1109 }
1110 } /* debug for ever */
1111}
1112
1113
1114#if defined(VBOX_WITH_REM) || defined(DEBUG)
1115/**
1116 * Steps recompiled code.
1117 *
1118 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1119 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1120 *
1121 * @param pVM The cross context VM structure.
1122 * @param pVCpu The cross context virtual CPU structure.
1123 */
1124static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1125{
1126 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1127
1128# ifdef VBOX_WITH_REM
1129 EMRemLock(pVM);
1130
1131 /*
1132 * Switch to REM, step instruction, switch back.
1133 */
1134 int rc = REMR3State(pVM, pVCpu);
1135 if (RT_SUCCESS(rc))
1136 {
1137 rc = REMR3Step(pVM, pVCpu);
1138 REMR3StateBack(pVM, pVCpu);
1139 }
1140 EMRemUnlock(pVM);
1141
1142# else
1143 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1144# endif
1145
1146 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1147 return rc;
1148}
1149#endif /* VBOX_WITH_REM || DEBUG */
1150
1151
1152#ifdef VBOX_WITH_REM
1153/**
1154 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1155 * critical section.
1156 *
1157 * @returns false - new fInREMState value.
1158 * @param pVM The cross context VM structure.
1159 * @param pVCpu The cross context virtual CPU structure.
1160 */
1161DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1162{
1163 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1164 REMR3StateBack(pVM, pVCpu);
1165 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1166
1167 EMRemUnlock(pVM);
1168 return false;
1169}
1170#endif
1171
1172
1173/**
1174 * Executes recompiled code.
1175 *
1176 * This function contains the recompiler version of the inner
1177 * execution loop (the outer loop being in EMR3ExecuteVM()).
1178 *
1179 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1180 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1181 *
1182 * @param pVM The cross context VM structure.
1183 * @param pVCpu The cross context virtual CPU structure.
1184 * @param pfFFDone Where to store an indicator telling whether or not
1185 * FFs were done before returning.
1186 *
1187 */
1188static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1189{
1190#ifdef LOG_ENABLED
1191 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1192 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1193
1194 if (pCtx->eflags.Bits.u1VM)
1195 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1196 else
1197 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1198#endif
1199 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1200
1201#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1202 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1203 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1204 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1205#endif
1206
1207 /*
1208 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1209 * or the REM suggests raw-mode execution.
1210 */
1211 *pfFFDone = false;
1212#ifdef VBOX_WITH_REM
1213 bool fInREMState = false;
1214#else
1215 uint32_t cLoops = 0;
1216#endif
1217 int rc = VINF_SUCCESS;
1218 for (;;)
1219 {
1220#ifdef VBOX_WITH_REM
1221 /*
1222 * Lock REM and update the state if not already in sync.
1223 *
1224 * Note! Big lock, but you are not supposed to own any lock when
1225 * coming in here.
1226 */
1227 if (!fInREMState)
1228 {
1229 EMRemLock(pVM);
1230 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1231
1232 /* Flush the recompiler translation blocks if the VCPU has changed,
1233 also force a full CPU state resync. */
1234 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1235 {
1236 REMFlushTBs(pVM);
1237 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1238 }
1239 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1240
1241 rc = REMR3State(pVM, pVCpu);
1242
1243 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1244 if (RT_FAILURE(rc))
1245 break;
1246 fInREMState = true;
1247
1248 /*
1249 * We might have missed the raising of VMREQ, TIMER and some other
1250 * important FFs while we were busy switching the state. So, check again.
1251 */
1252 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1253 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1254 {
1255 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1256 goto l_REMDoForcedActions;
1257 }
1258 }
1259#endif
1260
1261 /*
1262 * Execute REM.
1263 */
1264 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1265 {
1266 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1267#ifdef VBOX_WITH_REM
1268 rc = REMR3Run(pVM, pVCpu);
1269#else
1270 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1271#endif
1272 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1273 }
1274 else
1275 {
1276 /* Give up this time slice; virtual time continues */
1277 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1278 RTThreadSleep(5);
1279 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1280 rc = VINF_SUCCESS;
1281 }
1282
1283 /*
1284 * Deal with high priority post execution FFs before doing anything
1285 * else. Sync back the state and leave the lock to be on the safe side.
1286 */
1287 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1288 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1289 {
1290#ifdef VBOX_WITH_REM
1291 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1292#endif
1293 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1294 }
1295
1296 /*
1297 * Process the returned status code.
1298 */
1299 if (rc != VINF_SUCCESS)
1300 {
1301 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1302 break;
1303 if (rc != VINF_REM_INTERRUPED_FF)
1304 {
1305#ifndef VBOX_WITH_REM
1306 /* Try dodge unimplemented IEM trouble by reschduling. */
1307 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1308 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1309 {
1310 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1311 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1312 {
1313 rc = VINF_EM_RESCHEDULE;
1314 break;
1315 }
1316 }
1317#endif
1318
1319 /*
1320 * Anything which is not known to us means an internal error
1321 * and the termination of the VM!
1322 */
1323 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1324 break;
1325 }
1326 }
1327
1328
1329 /*
1330 * Check and execute forced actions.
1331 *
1332 * Sync back the VM state and leave the lock before calling any of
1333 * these, you never know what's going to happen here.
1334 */
1335#ifdef VBOX_HIGH_RES_TIMERS_HACK
1336 TMTimerPollVoid(pVM, pVCpu);
1337#endif
1338 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1339 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1340 || VMCPU_FF_IS_PENDING(pVCpu,
1341 VMCPU_FF_ALL_REM_MASK
1342 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1343 {
1344#ifdef VBOX_WITH_REM
1345l_REMDoForcedActions:
1346 if (fInREMState)
1347 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1348#endif
1349 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1350 rc = emR3ForcedActions(pVM, pVCpu, rc);
1351 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1352 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1353 if ( rc != VINF_SUCCESS
1354 && rc != VINF_EM_RESCHEDULE_REM)
1355 {
1356 *pfFFDone = true;
1357 break;
1358 }
1359 }
1360
1361#ifndef VBOX_WITH_REM
1362 /*
1363 * Have to check if we can get back to fast execution mode every so often.
1364 */
1365 if (!(++cLoops & 7))
1366 {
1367 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1368 if ( enmCheck != EMSTATE_REM
1369 && enmCheck != EMSTATE_IEM_THEN_REM)
1370 return VINF_EM_RESCHEDULE;
1371 }
1372#endif
1373
1374 } /* The Inner Loop, recompiled execution mode version. */
1375
1376
1377#ifdef VBOX_WITH_REM
1378 /*
1379 * Returning. Sync back the VM state if required.
1380 */
1381 if (fInREMState)
1382 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1383#endif
1384
1385 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1386 return rc;
1387}
1388
1389
1390#ifdef DEBUG
1391
1392int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1393{
1394 EMSTATE enmOldState = pVCpu->em.s.enmState;
1395
1396 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1397
1398 Log(("Single step BEGIN:\n"));
1399 for (uint32_t i = 0; i < cIterations; i++)
1400 {
1401 DBGFR3PrgStep(pVCpu);
1402 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1403 emR3RemStep(pVM, pVCpu);
1404 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1405 break;
1406 }
1407 Log(("Single step END:\n"));
1408 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1409 pVCpu->em.s.enmState = enmOldState;
1410 return VINF_EM_RESCHEDULE;
1411}
1412
1413#endif /* DEBUG */
1414
1415
1416/**
1417 * Try execute the problematic code in IEM first, then fall back on REM if there
1418 * is too much of it or if IEM doesn't implement something.
1419 *
1420 * @returns Strict VBox status code from IEMExecLots.
1421 * @param pVM The cross context VM structure.
1422 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1423 * @param pfFFDone Force flags done indicator.
1424 *
1425 * @thread EMT(pVCpu)
1426 */
1427static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1428{
1429 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1430 *pfFFDone = false;
1431
1432 /*
1433 * Execute in IEM for a while.
1434 */
1435 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1436 {
1437 uint32_t cInstructions;
1438 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1439 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1440 if (rcStrict != VINF_SUCCESS)
1441 {
1442 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1443 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1444 break;
1445
1446 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1447 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1448 return rcStrict;
1449 }
1450
1451 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1452 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1453 {
1454 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1455 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1456 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1457 pVCpu->em.s.enmState = enmNewState;
1458 return VINF_SUCCESS;
1459 }
1460
1461 /*
1462 * Check for pending actions.
1463 */
1464 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1465 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1466 return VINF_SUCCESS;
1467 }
1468
1469 /*
1470 * Switch to REM.
1471 */
1472 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1473 pVCpu->em.s.enmState = EMSTATE_REM;
1474 return VINF_SUCCESS;
1475}
1476
1477
1478/**
1479 * Decides whether to execute RAW, HWACC or REM.
1480 *
1481 * @returns new EM state
1482 * @param pVM The cross context VM structure.
1483 * @param pVCpu The cross context virtual CPU structure.
1484 * @param pCtx Pointer to the guest CPU context.
1485 */
1486EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1487{
1488 /*
1489 * When forcing raw-mode execution, things are simple.
1490 */
1491 if (pVCpu->em.s.fForceRAW)
1492 return EMSTATE_RAW;
1493
1494 /*
1495 * We stay in the wait for SIPI state unless explicitly told otherwise.
1496 */
1497 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1498 return EMSTATE_WAIT_SIPI;
1499
1500 /*
1501 * Execute everything in IEM?
1502 */
1503 if (pVM->em.s.fIemExecutesAll)
1504 return EMSTATE_IEM;
1505
1506 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1507 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1508 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1509
1510 X86EFLAGS EFlags = pCtx->eflags;
1511 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1512 {
1513 if (EMIsHwVirtExecutionEnabled(pVM))
1514 {
1515 if (VM_IS_HM_ENABLED(pVM))
1516 {
1517 if (HMR3CanExecuteGuest(pVM, pCtx))
1518 return EMSTATE_HM;
1519 }
1520 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1521 return EMSTATE_NEM;
1522
1523 /*
1524 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1525 * turns off monitoring features essential for raw mode!
1526 */
1527 return EMSTATE_IEM_THEN_REM;
1528 }
1529 }
1530
1531 /*
1532 * Standard raw-mode:
1533 *
1534 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1535 * or 32 bits protected mode ring 0 code
1536 *
1537 * The tests are ordered by the likelihood of being true during normal execution.
1538 */
1539 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1540 {
1541 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1542 return EMSTATE_REM;
1543 }
1544
1545# ifndef VBOX_RAW_V86
1546 if (EFlags.u32 & X86_EFL_VM) {
1547 Log2(("raw mode refused: VM_MASK\n"));
1548 return EMSTATE_REM;
1549 }
1550# endif
1551
1552 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1553 uint32_t u32CR0 = pCtx->cr0;
1554 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1555 {
1556 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1557 return EMSTATE_REM;
1558 }
1559
1560 if (pCtx->cr4 & X86_CR4_PAE)
1561 {
1562 uint32_t u32Dummy, u32Features;
1563
1564 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1565 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1566 return EMSTATE_REM;
1567 }
1568
1569 unsigned uSS = pCtx->ss.Sel;
1570 if ( pCtx->eflags.Bits.u1VM
1571 || (uSS & X86_SEL_RPL) == 3)
1572 {
1573 if (!EMIsRawRing3Enabled(pVM))
1574 return EMSTATE_REM;
1575
1576 if (!(EFlags.u32 & X86_EFL_IF))
1577 {
1578 Log2(("raw mode refused: IF (RawR3)\n"));
1579 return EMSTATE_REM;
1580 }
1581
1582 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1583 {
1584 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1585 return EMSTATE_REM;
1586 }
1587 }
1588 else
1589 {
1590 if (!EMIsRawRing0Enabled(pVM))
1591 return EMSTATE_REM;
1592
1593 if (EMIsRawRing1Enabled(pVM))
1594 {
1595 /* Only ring 0 and 1 supervisor code. */
1596 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1597 {
1598 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1599 return EMSTATE_REM;
1600 }
1601 }
1602 /* Only ring 0 supervisor code. */
1603 else if ((uSS & X86_SEL_RPL) != 0)
1604 {
1605 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1606 return EMSTATE_REM;
1607 }
1608
1609 // Let's start with pure 32 bits ring 0 code first
1610 /** @todo What's pure 32-bit mode? flat? */
1611 if ( !(pCtx->ss.Attr.n.u1DefBig)
1612 || !(pCtx->cs.Attr.n.u1DefBig))
1613 {
1614 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1615 return EMSTATE_REM;
1616 }
1617
1618 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1619 if (!(u32CR0 & X86_CR0_WP))
1620 {
1621 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1622 return EMSTATE_REM;
1623 }
1624
1625# ifdef VBOX_WITH_RAW_MODE
1626 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1627 {
1628 Log2(("raw r0 mode forced: patch code\n"));
1629# ifdef VBOX_WITH_SAFE_STR
1630 Assert(pCtx->tr.Sel);
1631# endif
1632 return EMSTATE_RAW;
1633 }
1634# endif /* VBOX_WITH_RAW_MODE */
1635
1636# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1637 if (!(EFlags.u32 & X86_EFL_IF))
1638 {
1639 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1640 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1641 return EMSTATE_REM;
1642 }
1643# endif
1644
1645# ifndef VBOX_WITH_RAW_RING1
1646 /** @todo still necessary??? */
1647 if (EFlags.Bits.u2IOPL != 0)
1648 {
1649 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1650 return EMSTATE_REM;
1651 }
1652# endif
1653 }
1654
1655 /*
1656 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1657 */
1658 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1659 {
1660 Log2(("raw mode refused: stale CS\n"));
1661 return EMSTATE_REM;
1662 }
1663 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1664 {
1665 Log2(("raw mode refused: stale SS\n"));
1666 return EMSTATE_REM;
1667 }
1668 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1669 {
1670 Log2(("raw mode refused: stale DS\n"));
1671 return EMSTATE_REM;
1672 }
1673 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1674 {
1675 Log2(("raw mode refused: stale ES\n"));
1676 return EMSTATE_REM;
1677 }
1678 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1679 {
1680 Log2(("raw mode refused: stale FS\n"));
1681 return EMSTATE_REM;
1682 }
1683 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1684 {
1685 Log2(("raw mode refused: stale GS\n"));
1686 return EMSTATE_REM;
1687 }
1688
1689# ifdef VBOX_WITH_SAFE_STR
1690 if (pCtx->tr.Sel == 0)
1691 {
1692 Log(("Raw mode refused -> TR=0\n"));
1693 return EMSTATE_REM;
1694 }
1695# endif
1696
1697 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1698 return EMSTATE_RAW;
1699}
1700
1701
1702/**
1703 * Executes all high priority post execution force actions.
1704 *
1705 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1706 * fatal error status code.
1707 *
1708 * @param pVM The cross context VM structure.
1709 * @param pVCpu The cross context virtual CPU structure.
1710 * @param rc The current strict VBox status code rc.
1711 */
1712VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1713{
1714 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1715
1716 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1717 PDMCritSectBothFF(pVCpu);
1718
1719 /* Update CR3 (Nested Paging case for HM). */
1720 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1721 {
1722 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1723 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1724 if (RT_FAILURE(rc2))
1725 return rc2;
1726 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1727 }
1728
1729 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1730 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1731 {
1732 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1733 if (CPUMIsGuestInPAEMode(pVCpu))
1734 {
1735 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1736 AssertPtr(pPdpes);
1737
1738 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1739 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1740 }
1741 else
1742 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1743 }
1744
1745 /* IEM has pending work (typically memory write after INS instruction). */
1746 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1747 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1748
1749 /* IOM has pending work (comitting an I/O or MMIO write). */
1750 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1751 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1752
1753#ifdef VBOX_WITH_RAW_MODE
1754 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1755 CSAMR3DoPendingAction(pVM, pVCpu);
1756#endif
1757
1758 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1759 {
1760 if ( rc > VINF_EM_NO_MEMORY
1761 && rc <= VINF_EM_LAST)
1762 rc = VINF_EM_NO_MEMORY;
1763 }
1764
1765 return rc;
1766}
1767
1768#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1769/**
1770 * Helper for emR3ForcedActions() for injecting interrupts into the
1771 * nested-guest.
1772 *
1773 * @returns VBox status code.
1774 * @param pVCpu The cross context virtual CPU structure.
1775 * @param pCtx Pointer to the nested-guest CPU context.
1776 * @param pfResched Where to store whether a reschedule is required.
1777 * @param pfInject Where to store whether an interrupt was injected (and if
1778 * a wake up is pending).
1779 */
1780static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1781{
1782 *pfResched = false;
1783 *pfInject = false;
1784 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1785 {
1786 PVM pVM = pVCpu->CTX_SUFF(pVM);
1787 Assert(pCtx->hwvirt.fGif);
1788 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
1789#ifdef VBOX_WITH_RAW_MODE
1790 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1791#endif
1792 if (fVirtualGif)
1793 {
1794 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1795 {
1796 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1797 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1798 {
1799 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1800 {
1801 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1802 if (RT_SUCCESS(rcStrict))
1803 {
1804 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1805 * doesn't intercept HLT but intercepts INTR? */
1806 *pfResched = true;
1807 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1808 if (rcStrict == VINF_SVM_VMEXIT)
1809 return VINF_SUCCESS;
1810 return VBOXSTRICTRC_VAL(rcStrict);
1811 }
1812
1813 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1814 return VINF_EM_TRIPLE_FAULT;
1815 }
1816
1817 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1818 /** @todo this really isn't nice, should properly handle this */
1819 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1820 Assert(rc != VINF_PGM_CHANGE_MODE);
1821 if (rc == VINF_SVM_VMEXIT)
1822 rc = VINF_SUCCESS;
1823 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1824 || rc == VINF_EM_RESCHEDULE_HM
1825 || rc == VINF_EM_RESCHEDULE_RAW))
1826 {
1827 rc = VINF_EM_RESCHEDULE;
1828 }
1829
1830 *pfResched = true;
1831 *pfInject = true;
1832 return rc;
1833 }
1834 }
1835
1836 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1837 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1838 {
1839 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1840 {
1841 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1842 if (RT_SUCCESS(rcStrict))
1843 {
1844 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1845 * doesn't intercept HLT but intercepts VINTR? */
1846 *pfResched = true;
1847 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1848 if (rcStrict == VINF_SVM_VMEXIT)
1849 return VINF_SUCCESS;
1850 return VBOXSTRICTRC_VAL(rcStrict);
1851 }
1852
1853 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1854 return VINF_EM_TRIPLE_FAULT;
1855 }
1856
1857 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1858 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1859 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1860 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1861 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1862
1863 *pfResched = true;
1864 *pfInject = true;
1865 return VINF_EM_RESCHEDULE;
1866 }
1867 }
1868 return VINF_SUCCESS;
1869 }
1870
1871 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1872 { /** @todo Nested VMX. */ }
1873
1874 /* Shouldn't really get here. */
1875 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1876 return VERR_EM_INTERNAL_ERROR;
1877}
1878#endif
1879
1880/**
1881 * Executes all pending forced actions.
1882 *
1883 * Forced actions can cause execution delays and execution
1884 * rescheduling. The first we deal with using action priority, so
1885 * that for instance pending timers aren't scheduled and ran until
1886 * right before execution. The rescheduling we deal with using
1887 * return codes. The same goes for VM termination, only in that case
1888 * we exit everything.
1889 *
1890 * @returns VBox status code of equal or greater importance/severity than rc.
1891 * The most important ones are: VINF_EM_RESCHEDULE,
1892 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1893 *
1894 * @param pVM The cross context VM structure.
1895 * @param pVCpu The cross context virtual CPU structure.
1896 * @param rc The current rc.
1897 *
1898 */
1899int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1900{
1901 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1902#ifdef VBOX_STRICT
1903 int rcIrq = VINF_SUCCESS;
1904#endif
1905 int rc2;
1906#define UPDATE_RC() \
1907 do { \
1908 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1909 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1910 break; \
1911 if (!rc || rc2 < rc) \
1912 rc = rc2; \
1913 } while (0)
1914 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1915
1916 /*
1917 * Post execution chunk first.
1918 */
1919 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1920 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1921 {
1922 /*
1923 * EMT Rendezvous (must be serviced before termination).
1924 */
1925 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1926 {
1927 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1928 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1929 UPDATE_RC();
1930 /** @todo HACK ALERT! The following test is to make sure EM+TM
1931 * thinks the VM is stopped/reset before the next VM state change
1932 * is made. We need a better solution for this, or at least make it
1933 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1934 * VINF_EM_SUSPEND). */
1935 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1936 {
1937 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1938 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1939 return rc;
1940 }
1941 }
1942
1943 /*
1944 * State change request (cleared by vmR3SetStateLocked).
1945 */
1946 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1947 {
1948 VMSTATE enmState = VMR3GetState(pVM);
1949 switch (enmState)
1950 {
1951 case VMSTATE_FATAL_ERROR:
1952 case VMSTATE_FATAL_ERROR_LS:
1953 case VMSTATE_GURU_MEDITATION:
1954 case VMSTATE_GURU_MEDITATION_LS:
1955 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1956 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1957 return VINF_EM_SUSPEND;
1958
1959 case VMSTATE_DESTROYING:
1960 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1961 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1962 return VINF_EM_TERMINATE;
1963
1964 default:
1965 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1966 }
1967 }
1968
1969 /*
1970 * Debugger Facility polling.
1971 */
1972 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1973 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1974 {
1975 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1976 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1977 UPDATE_RC();
1978 }
1979
1980 /*
1981 * Postponed reset request.
1982 */
1983 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1984 {
1985 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1986 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1987 UPDATE_RC();
1988 }
1989
1990#ifdef VBOX_WITH_RAW_MODE
1991 /*
1992 * CSAM page scanning.
1993 */
1994 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1995 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1996 {
1997 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1998 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1999 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2000 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2001 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
2002 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
2003 }
2004#endif
2005
2006 /*
2007 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
2008 */
2009 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2010 {
2011 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2012 UPDATE_RC();
2013 if (rc == VINF_EM_NO_MEMORY)
2014 return rc;
2015 }
2016
2017 /* check that we got them all */
2018 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2019 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
2020 }
2021
2022 /*
2023 * Normal priority then.
2024 * (Executed in no particular order.)
2025 */
2026 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
2027 {
2028 /*
2029 * PDM Queues are pending.
2030 */
2031 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
2032 PDMR3QueueFlushAll(pVM);
2033
2034 /*
2035 * PDM DMA transfers are pending.
2036 */
2037 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
2038 PDMR3DmaRun(pVM);
2039
2040 /*
2041 * EMT Rendezvous (make sure they are handled before the requests).
2042 */
2043 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2044 {
2045 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2046 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2047 UPDATE_RC();
2048 /** @todo HACK ALERT! The following test is to make sure EM+TM
2049 * thinks the VM is stopped/reset before the next VM state change
2050 * is made. We need a better solution for this, or at least make it
2051 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2052 * VINF_EM_SUSPEND). */
2053 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2054 {
2055 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2056 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2057 return rc;
2058 }
2059 }
2060
2061 /*
2062 * Requests from other threads.
2063 */
2064 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
2065 {
2066 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2067 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
2068 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
2069 {
2070 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2071 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2072 return rc2;
2073 }
2074 UPDATE_RC();
2075 /** @todo HACK ALERT! The following test is to make sure EM+TM
2076 * thinks the VM is stopped/reset before the next VM state change
2077 * is made. We need a better solution for this, or at least make it
2078 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2079 * VINF_EM_SUSPEND). */
2080 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2081 {
2082 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2083 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2084 return rc;
2085 }
2086 }
2087
2088#ifdef VBOX_WITH_REM
2089 /* Replay the handler notification changes. */
2090 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2091 {
2092 /* Try not to cause deadlocks. */
2093 if ( pVM->cCpus == 1
2094 || ( !PGMIsLockOwner(pVM)
2095 && !IOMIsLockWriteOwner(pVM))
2096 )
2097 {
2098 EMRemLock(pVM);
2099 REMR3ReplayHandlerNotifications(pVM);
2100 EMRemUnlock(pVM);
2101 }
2102 }
2103#endif
2104
2105 /* check that we got them all */
2106 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2107 }
2108
2109 /*
2110 * Normal priority then. (per-VCPU)
2111 * (Executed in no particular order.)
2112 */
2113 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2114 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2115 {
2116 /*
2117 * Requests from other threads.
2118 */
2119 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2120 {
2121 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2122 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2123 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2124 {
2125 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2126 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2127 return rc2;
2128 }
2129 UPDATE_RC();
2130 /** @todo HACK ALERT! The following test is to make sure EM+TM
2131 * thinks the VM is stopped/reset before the next VM state change
2132 * is made. We need a better solution for this, or at least make it
2133 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2134 * VINF_EM_SUSPEND). */
2135 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2136 {
2137 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2138 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2139 return rc;
2140 }
2141 }
2142
2143 /* check that we got them all */
2144 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2145 }
2146
2147 /*
2148 * High priority pre execution chunk last.
2149 * (Executed in ascending priority order.)
2150 */
2151 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2152 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2153 {
2154 /*
2155 * Timers before interrupts.
2156 */
2157 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2158 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2159 TMR3TimerQueuesDo(pVM);
2160
2161 /*
2162 * Pick up asynchronously posted interrupts into the APIC.
2163 */
2164 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2165 APICUpdatePendingInterrupts(pVCpu);
2166
2167 /*
2168 * The instruction following an emulated STI should *always* be executed!
2169 *
2170 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2171 * the eip is the same as the inhibited instr address. Before we
2172 * are able to execute this instruction in raw mode (iret to
2173 * guest code) an external interrupt might force a world switch
2174 * again. Possibly allowing a guest interrupt to be dispatched
2175 * in the process. This could break the guest. Sounds very
2176 * unlikely, but such timing sensitive problem are not as rare as
2177 * you might think.
2178 */
2179 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2180 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2181 {
2182 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2183 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2184 {
2185 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2186 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2187 }
2188 else
2189 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2190 }
2191
2192 /*
2193 * Interrupts.
2194 */
2195 bool fWakeupPending = false;
2196 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2197 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2198 {
2199 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2200 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2201 {
2202 Assert(!HMR3IsEventPending(pVCpu));
2203 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2204#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2205 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2206 {
2207 bool fResched, fInject;
2208 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2209 if (fInject)
2210 {
2211 fWakeupPending = true;
2212# ifdef VBOX_STRICT
2213 rcIrq = rc2;
2214# endif
2215 }
2216 if (fResched)
2217 UPDATE_RC();
2218 }
2219 else
2220#endif
2221 {
2222 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2223 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2224#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2225 && pCtx->hwvirt.fGif
2226#endif
2227#ifdef VBOX_WITH_RAW_MODE
2228 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2229#endif
2230 && pCtx->eflags.Bits.u1IF)
2231 {
2232 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2233 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2234 /** @todo this really isn't nice, should properly handle this */
2235 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2236 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2237 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2238 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2239 || rc2 == VINF_EM_RESCHEDULE_HM
2240 || rc2 == VINF_EM_RESCHEDULE_RAW))
2241 {
2242 rc2 = VINF_EM_RESCHEDULE;
2243 }
2244#ifdef VBOX_STRICT
2245 rcIrq = rc2;
2246#endif
2247 UPDATE_RC();
2248 /* Reschedule required: We must not miss the wakeup below! */
2249 fWakeupPending = true;
2250 }
2251 }
2252 }
2253 }
2254
2255 /*
2256 * Allocate handy pages.
2257 */
2258 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2259 {
2260 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2261 UPDATE_RC();
2262 }
2263
2264 /*
2265 * Debugger Facility request.
2266 */
2267 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2268 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2269 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2270 {
2271 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2272 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2273 UPDATE_RC();
2274 }
2275
2276 /*
2277 * EMT Rendezvous (must be serviced before termination).
2278 */
2279 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2280 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2281 {
2282 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2283 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2284 UPDATE_RC();
2285 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2286 * stopped/reset before the next VM state change is made. We need a better
2287 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2288 * && rc >= VINF_EM_SUSPEND). */
2289 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2290 {
2291 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2292 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2293 return rc;
2294 }
2295 }
2296
2297 /*
2298 * State change request (cleared by vmR3SetStateLocked).
2299 */
2300 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2301 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2302 {
2303 VMSTATE enmState = VMR3GetState(pVM);
2304 switch (enmState)
2305 {
2306 case VMSTATE_FATAL_ERROR:
2307 case VMSTATE_FATAL_ERROR_LS:
2308 case VMSTATE_GURU_MEDITATION:
2309 case VMSTATE_GURU_MEDITATION_LS:
2310 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2311 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2312 return VINF_EM_SUSPEND;
2313
2314 case VMSTATE_DESTROYING:
2315 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2316 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2317 return VINF_EM_TERMINATE;
2318
2319 default:
2320 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2321 }
2322 }
2323
2324 /*
2325 * Out of memory? Since most of our fellow high priority actions may cause us
2326 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2327 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2328 * than us since we can terminate without allocating more memory.
2329 */
2330 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2331 {
2332 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2333 UPDATE_RC();
2334 if (rc == VINF_EM_NO_MEMORY)
2335 return rc;
2336 }
2337
2338 /*
2339 * If the virtual sync clock is still stopped, make TM restart it.
2340 */
2341 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2342 TMR3VirtualSyncFF(pVM, pVCpu);
2343
2344#ifdef DEBUG
2345 /*
2346 * Debug, pause the VM.
2347 */
2348 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2349 {
2350 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2351 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2352 return VINF_EM_SUSPEND;
2353 }
2354#endif
2355
2356 /* check that we got them all */
2357 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2358 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2359 }
2360
2361#undef UPDATE_RC
2362 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2363 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2364 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2365 return rc;
2366}
2367
2368
2369/**
2370 * Check if the preset execution time cap restricts guest execution scheduling.
2371 *
2372 * @returns true if allowed, false otherwise
2373 * @param pVM The cross context VM structure.
2374 * @param pVCpu The cross context virtual CPU structure.
2375 */
2376bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2377{
2378 uint64_t u64UserTime, u64KernelTime;
2379
2380 if ( pVM->uCpuExecutionCap != 100
2381 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2382 {
2383 uint64_t u64TimeNow = RTTimeMilliTS();
2384 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2385 {
2386 /* New time slice. */
2387 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2388 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2389 pVCpu->em.s.u64TimeSliceExec = 0;
2390 }
2391 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2392
2393 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2394 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2395 return false;
2396 }
2397 return true;
2398}
2399
2400
2401/**
2402 * Execute VM.
2403 *
2404 * This function is the main loop of the VM. The emulation thread
2405 * calls this function when the VM has been successfully constructed
2406 * and we're ready for executing the VM.
2407 *
2408 * Returning from this function means that the VM is turned off or
2409 * suspended (state already saved) and deconstruction is next in line.
2410 *
2411 * All interaction from other thread are done using forced actions
2412 * and signaling of the wait object.
2413 *
2414 * @returns VBox status code, informational status codes may indicate failure.
2415 * @param pVM The cross context VM structure.
2416 * @param pVCpu The cross context virtual CPU structure.
2417 */
2418VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2419{
2420 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2421 pVM,
2422 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2423 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2424 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2425 pVCpu->em.s.fForceRAW));
2426 VM_ASSERT_EMT(pVM);
2427 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2428 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2429 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2430 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2431
2432 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2433 if (rc == 0)
2434 {
2435 /*
2436 * Start the virtual time.
2437 */
2438 TMR3NotifyResume(pVM, pVCpu);
2439
2440 /*
2441 * The Outer Main Loop.
2442 */
2443 bool fFFDone = false;
2444
2445 /* Reschedule right away to start in the right state. */
2446 rc = VINF_SUCCESS;
2447
2448 /* If resuming after a pause or a state load, restore the previous
2449 state or else we'll start executing code. Else, just reschedule. */
2450 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2451 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2452 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2453 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2454 else
2455 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2456 pVCpu->em.s.cIemThenRemInstructions = 0;
2457 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2458
2459 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2460 for (;;)
2461 {
2462 /*
2463 * Before we can schedule anything (we're here because
2464 * scheduling is required) we must service any pending
2465 * forced actions to avoid any pending action causing
2466 * immediate rescheduling upon entering an inner loop
2467 *
2468 * Do forced actions.
2469 */
2470 if ( !fFFDone
2471 && RT_SUCCESS(rc)
2472 && rc != VINF_EM_TERMINATE
2473 && rc != VINF_EM_OFF
2474 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2475 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2476 {
2477 rc = emR3ForcedActions(pVM, pVCpu, rc);
2478 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2479 if ( ( rc == VINF_EM_RESCHEDULE_REM
2480 || rc == VINF_EM_RESCHEDULE_HM)
2481 && pVCpu->em.s.fForceRAW)
2482 rc = VINF_EM_RESCHEDULE_RAW;
2483 }
2484 else if (fFFDone)
2485 fFFDone = false;
2486
2487 /*
2488 * Now what to do?
2489 */
2490 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2491 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2492 switch (rc)
2493 {
2494 /*
2495 * Keep doing what we're currently doing.
2496 */
2497 case VINF_SUCCESS:
2498 break;
2499
2500 /*
2501 * Reschedule - to raw-mode execution.
2502 */
2503/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2504 case VINF_EM_RESCHEDULE_RAW:
2505 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2506 if (VM_IS_RAW_MODE_ENABLED(pVM))
2507 {
2508 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2509 pVCpu->em.s.enmState = EMSTATE_RAW;
2510 }
2511 else
2512 {
2513 AssertLogRelFailed();
2514 pVCpu->em.s.enmState = EMSTATE_NONE;
2515 }
2516 break;
2517
2518 /*
2519 * Reschedule - to HM or NEM.
2520 */
2521 case VINF_EM_RESCHEDULE_HM:
2522 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2523 Assert(!pVCpu->em.s.fForceRAW);
2524 if (VM_IS_HM_ENABLED(pVM))
2525 {
2526 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2527 pVCpu->em.s.enmState = EMSTATE_HM;
2528 }
2529 else if (VM_IS_NEM_ENABLED(pVM))
2530 {
2531 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2532 pVCpu->em.s.enmState = EMSTATE_NEM;
2533 }
2534 else
2535 {
2536 AssertLogRelFailed();
2537 pVCpu->em.s.enmState = EMSTATE_NONE;
2538 }
2539 break;
2540
2541 /*
2542 * Reschedule - to recompiled execution.
2543 */
2544 case VINF_EM_RESCHEDULE_REM:
2545 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2546 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2547 {
2548 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2549 enmOldState, EMSTATE_IEM_THEN_REM));
2550 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2551 {
2552 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2553 pVCpu->em.s.cIemThenRemInstructions = 0;
2554 }
2555 }
2556 else
2557 {
2558 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2559 pVCpu->em.s.enmState = EMSTATE_REM;
2560 }
2561 break;
2562
2563 /*
2564 * Resume.
2565 */
2566 case VINF_EM_RESUME:
2567 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2568 /* Don't reschedule in the halted or wait for SIPI case. */
2569 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2570 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2571 {
2572 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2573 break;
2574 }
2575 /* fall through and get scheduled. */
2576 RT_FALL_THRU();
2577
2578 /*
2579 * Reschedule.
2580 */
2581 case VINF_EM_RESCHEDULE:
2582 {
2583 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2584 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2585 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2586 pVCpu->em.s.cIemThenRemInstructions = 0;
2587 pVCpu->em.s.enmState = enmState;
2588 break;
2589 }
2590
2591 /*
2592 * Halted.
2593 */
2594 case VINF_EM_HALT:
2595 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2596 pVCpu->em.s.enmState = EMSTATE_HALTED;
2597 break;
2598
2599 /*
2600 * Switch to the wait for SIPI state (application processor only)
2601 */
2602 case VINF_EM_WAIT_SIPI:
2603 Assert(pVCpu->idCpu != 0);
2604 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2605 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2606 break;
2607
2608
2609 /*
2610 * Suspend.
2611 */
2612 case VINF_EM_SUSPEND:
2613 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2614 Assert(enmOldState != EMSTATE_SUSPENDED);
2615 pVCpu->em.s.enmPrevState = enmOldState;
2616 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2617 break;
2618
2619 /*
2620 * Reset.
2621 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2622 */
2623 case VINF_EM_RESET:
2624 {
2625 if (pVCpu->idCpu == 0)
2626 {
2627 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2628 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2629 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2630 pVCpu->em.s.cIemThenRemInstructions = 0;
2631 pVCpu->em.s.enmState = enmState;
2632 }
2633 else
2634 {
2635 /* All other VCPUs go into the wait for SIPI state. */
2636 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2637 }
2638 break;
2639 }
2640
2641 /*
2642 * Power Off.
2643 */
2644 case VINF_EM_OFF:
2645 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2646 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2647 TMR3NotifySuspend(pVM, pVCpu);
2648 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2649 return rc;
2650
2651 /*
2652 * Terminate the VM.
2653 */
2654 case VINF_EM_TERMINATE:
2655 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2656 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2657 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2658 TMR3NotifySuspend(pVM, pVCpu);
2659 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2660 return rc;
2661
2662
2663 /*
2664 * Out of memory, suspend the VM and stuff.
2665 */
2666 case VINF_EM_NO_MEMORY:
2667 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2668 Assert(enmOldState != EMSTATE_SUSPENDED);
2669 pVCpu->em.s.enmPrevState = enmOldState;
2670 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2671 TMR3NotifySuspend(pVM, pVCpu);
2672 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2673
2674 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2675 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2676 if (rc != VINF_EM_SUSPEND)
2677 {
2678 if (RT_SUCCESS_NP(rc))
2679 {
2680 AssertLogRelMsgFailed(("%Rrc\n", rc));
2681 rc = VERR_EM_INTERNAL_ERROR;
2682 }
2683 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2684 }
2685 return rc;
2686
2687 /*
2688 * Guest debug events.
2689 */
2690 case VINF_EM_DBG_STEPPED:
2691 case VINF_EM_DBG_STOP:
2692 case VINF_EM_DBG_EVENT:
2693 case VINF_EM_DBG_BREAKPOINT:
2694 case VINF_EM_DBG_STEP:
2695 if (enmOldState == EMSTATE_RAW)
2696 {
2697 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2698 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2699 }
2700 else if (enmOldState == EMSTATE_HM)
2701 {
2702 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2703 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2704 }
2705 else if (enmOldState == EMSTATE_NEM)
2706 {
2707 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2708 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2709 }
2710 else if (enmOldState == EMSTATE_REM)
2711 {
2712 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2713 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2714 }
2715 else
2716 {
2717 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2718 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2719 }
2720 break;
2721
2722 /*
2723 * Hypervisor debug events.
2724 */
2725 case VINF_EM_DBG_HYPER_STEPPED:
2726 case VINF_EM_DBG_HYPER_BREAKPOINT:
2727 case VINF_EM_DBG_HYPER_ASSERTION:
2728 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2729 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2730 break;
2731
2732 /*
2733 * Triple fault.
2734 */
2735 case VINF_EM_TRIPLE_FAULT:
2736 if (!pVM->em.s.fGuruOnTripleFault)
2737 {
2738 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2739 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2740 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2741 continue;
2742 }
2743 /* Else fall through and trigger a guru. */
2744 RT_FALL_THRU();
2745
2746 case VERR_VMM_RING0_ASSERTION:
2747 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2748 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2749 break;
2750
2751 /*
2752 * Any error code showing up here other than the ones we
2753 * know and process above are considered to be FATAL.
2754 *
2755 * Unknown warnings and informational status codes are also
2756 * included in this.
2757 */
2758 default:
2759 if (RT_SUCCESS_NP(rc))
2760 {
2761 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2762 rc = VERR_EM_INTERNAL_ERROR;
2763 }
2764 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2765 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2766 break;
2767 }
2768
2769 /*
2770 * Act on state transition.
2771 */
2772 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2773 if (enmOldState != enmNewState)
2774 {
2775 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2776
2777 /* Clear MWait flags and the unhalt FF. */
2778 if ( enmOldState == EMSTATE_HALTED
2779 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2780 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2781 && ( enmNewState == EMSTATE_RAW
2782 || enmNewState == EMSTATE_HM
2783 || enmNewState == EMSTATE_NEM
2784 || enmNewState == EMSTATE_REM
2785 || enmNewState == EMSTATE_IEM_THEN_REM
2786 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2787 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2788 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2789 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2790 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2791 {
2792 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2793 {
2794 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2795 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2796 }
2797 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2798 {
2799 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2800 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2801 }
2802 }
2803 }
2804 else
2805 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2806
2807 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2808 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2809
2810 /*
2811 * Act on the new state.
2812 */
2813 switch (enmNewState)
2814 {
2815 /*
2816 * Execute raw.
2817 */
2818 case EMSTATE_RAW:
2819#ifdef VBOX_WITH_RAW_MODE
2820 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2821#else
2822 AssertLogRelMsgFailed(("%Rrc\n", rc));
2823 rc = VERR_EM_INTERNAL_ERROR;
2824#endif
2825 break;
2826
2827 /*
2828 * Execute hardware accelerated raw.
2829 */
2830 case EMSTATE_HM:
2831 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2832 break;
2833
2834 /*
2835 * Execute hardware accelerated raw.
2836 */
2837 case EMSTATE_NEM:
2838 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2839 break;
2840
2841 /*
2842 * Execute recompiled.
2843 */
2844 case EMSTATE_REM:
2845 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2846 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2847 break;
2848
2849 /*
2850 * Execute in the interpreter.
2851 */
2852 case EMSTATE_IEM:
2853 {
2854#if 0 /* For testing purposes. */
2855 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2856 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2857 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2858 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2859 rc = VINF_SUCCESS;
2860 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2861#endif
2862 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2863 if (pVM->em.s.fIemExecutesAll)
2864 {
2865 Assert(rc != VINF_EM_RESCHEDULE_REM);
2866 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2867 Assert(rc != VINF_EM_RESCHEDULE_HM);
2868 }
2869 fFFDone = false;
2870 break;
2871 }
2872
2873 /*
2874 * Execute in IEM, hoping we can quickly switch aback to HM
2875 * or RAW execution. If our hopes fail, we go to REM.
2876 */
2877 case EMSTATE_IEM_THEN_REM:
2878 {
2879 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2880 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2881 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2882 break;
2883 }
2884
2885 /*
2886 * Application processor execution halted until SIPI.
2887 */
2888 case EMSTATE_WAIT_SIPI:
2889 /* no break */
2890 /*
2891 * hlt - execution halted until interrupt.
2892 */
2893 case EMSTATE_HALTED:
2894 {
2895 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2896 /* If HM (or someone else) store a pending interrupt in
2897 TRPM, it must be dispatched ASAP without any halting.
2898 Anything pending in TRPM has been accepted and the CPU
2899 should already be the right state to receive it. */
2900 if (TRPMHasTrap(pVCpu))
2901 rc = VINF_EM_RESCHEDULE;
2902 /* MWAIT has a special extension where it's woken up when
2903 an interrupt is pending even when IF=0. */
2904 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2905 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2906 {
2907 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2908 if (rc == VINF_SUCCESS)
2909 {
2910 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2911 APICUpdatePendingInterrupts(pVCpu);
2912
2913 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2914 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2915 {
2916 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2917 rc = VINF_EM_RESCHEDULE;
2918 }
2919 }
2920 }
2921 else
2922 {
2923 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2924 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2925 check VMCPU_FF_UPDATE_APIC here. */
2926 if ( rc == VINF_SUCCESS
2927 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2928 {
2929 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2930 rc = VINF_EM_RESCHEDULE;
2931 }
2932 }
2933
2934 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2935 break;
2936 }
2937
2938 /*
2939 * Suspended - return to VM.cpp.
2940 */
2941 case EMSTATE_SUSPENDED:
2942 TMR3NotifySuspend(pVM, pVCpu);
2943 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2944 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2945 return VINF_EM_SUSPEND;
2946
2947 /*
2948 * Debugging in the guest.
2949 */
2950 case EMSTATE_DEBUG_GUEST_RAW:
2951 case EMSTATE_DEBUG_GUEST_HM:
2952 case EMSTATE_DEBUG_GUEST_NEM:
2953 case EMSTATE_DEBUG_GUEST_IEM:
2954 case EMSTATE_DEBUG_GUEST_REM:
2955 TMR3NotifySuspend(pVM, pVCpu);
2956 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2957 TMR3NotifyResume(pVM, pVCpu);
2958 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2959 break;
2960
2961 /*
2962 * Debugging in the hypervisor.
2963 */
2964 case EMSTATE_DEBUG_HYPER:
2965 {
2966 TMR3NotifySuspend(pVM, pVCpu);
2967 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2968
2969 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2970 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2971 if (rc != VINF_SUCCESS)
2972 {
2973 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2974 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2975 else
2976 {
2977 /* switch to guru meditation mode */
2978 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2979 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2980 VMMR3FatalDump(pVM, pVCpu, rc);
2981 }
2982 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2983 return rc;
2984 }
2985
2986 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2987 TMR3NotifyResume(pVM, pVCpu);
2988 break;
2989 }
2990
2991 /*
2992 * Guru meditation takes place in the debugger.
2993 */
2994 case EMSTATE_GURU_MEDITATION:
2995 {
2996 TMR3NotifySuspend(pVM, pVCpu);
2997 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2998 VMMR3FatalDump(pVM, pVCpu, rc);
2999 emR3Debug(pVM, pVCpu, rc);
3000 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3001 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3002 return rc;
3003 }
3004
3005 /*
3006 * The states we don't expect here.
3007 */
3008 case EMSTATE_NONE:
3009 case EMSTATE_TERMINATING:
3010 default:
3011 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3012 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3013 TMR3NotifySuspend(pVM, pVCpu);
3014 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3015 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3016 return VERR_EM_INTERNAL_ERROR;
3017 }
3018 } /* The Outer Main Loop */
3019 }
3020 else
3021 {
3022 /*
3023 * Fatal error.
3024 */
3025 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3026 TMR3NotifySuspend(pVM, pVCpu);
3027 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3028 VMMR3FatalDump(pVM, pVCpu, rc);
3029 emR3Debug(pVM, pVCpu, rc);
3030 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3031 /** @todo change the VM state! */
3032 return rc;
3033 }
3034
3035 /* not reached */
3036}
3037
3038/**
3039 * Notify EM of a state change (used by FTM)
3040 *
3041 * @param pVM The cross context VM structure.
3042 */
3043VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3044{
3045 PVMCPU pVCpu = VMMGetCpu(pVM);
3046
3047 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3048 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3049 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3050 return VINF_SUCCESS;
3051}
3052
3053/**
3054 * Notify EM of a state change (used by FTM)
3055 *
3056 * @param pVM The cross context VM structure.
3057 */
3058VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3059{
3060 PVMCPU pVCpu = VMMGetCpu(pVM);
3061 EMSTATE enmCurState = pVCpu->em.s.enmState;
3062
3063 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3064 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3065 pVCpu->em.s.enmPrevState = enmCurState;
3066 return VINF_SUCCESS;
3067}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette