VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 57329

Last change on this file since 57329 was 56985, checked in by vboxsync, 9 years ago

VMM: Log and assertion formatting fixes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 122.2 KB
Line 
1/* $Id: EM.cpp 56985 2015-07-18 22:11:47Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/patm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/iom.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/pgm.h>
48#ifdef VBOX_WITH_REM
49# include <VBox/vmm/rem.h>
50#endif
51#include <VBox/vmm/tm.h>
52#include <VBox/vmm/mm.h>
53#include <VBox/vmm/ssm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/pdmqueue.h>
57#include <VBox/vmm/hm.h>
58#include <VBox/vmm/patm.h>
59#include "EMInternal.h"
60#include <VBox/vmm/vm.h>
61#include <VBox/vmm/uvm.h>
62#include <VBox/vmm/cpumdis.h>
63#include <VBox/dis.h>
64#include <VBox/disopcode.h>
65#include "VMMTracing.h"
66
67#include <iprt/asm.h>
68#include <iprt/string.h>
69#include <iprt/stream.h>
70#include <iprt/thread.h>
71
72
73/*******************************************************************************
74* Defined Constants And Macros *
75*******************************************************************************/
76#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
77#define EM_NOTIFY_HM
78#endif
79
80
81/*******************************************************************************
82* Internal Functions *
83*******************************************************************************/
84static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
85static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
86#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
87static const char *emR3GetStateName(EMSTATE enmState);
88#endif
89static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
90static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
91static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
92int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
93
94
95/**
96 * Initializes the EM.
97 *
98 * @returns VBox status code.
99 * @param pVM Pointer to the VM.
100 */
101VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
102{
103 LogFlow(("EMR3Init\n"));
104 /*
105 * Assert alignment and sizes.
106 */
107 AssertCompileMemberAlignment(VM, em.s, 32);
108 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
109 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
110
111 /*
112 * Init the structure.
113 */
114 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
115 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
116 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
117
118 bool fEnabled;
119 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
120 AssertLogRelRCReturn(rc, rc);
121 pVM->fRecompileUser = !fEnabled;
122
123 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileSupervisor = !fEnabled;
126
127#ifdef VBOX_WITH_RAW_RING1
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
129 AssertLogRelRCReturn(rc, rc);
130#else
131 pVM->fRawRing1Enabled = false; /* Disabled by default. */
132#endif
133
134 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
135 AssertLogRelRCReturn(rc, rc);
136
137 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
138 AssertLogRelRCReturn(rc, rc);
139 pVM->em.s.fGuruOnTripleFault = !fEnabled;
140 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
141 {
142 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
143 pVM->em.s.fGuruOnTripleFault = true;
144 }
145
146 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
147 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
148
149#ifdef VBOX_WITH_REM
150 /*
151 * Initialize the REM critical section.
152 */
153 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
154 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
155 AssertRCReturn(rc, rc);
156#endif
157
158 /*
159 * Saved state.
160 */
161 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
162 NULL, NULL, NULL,
163 NULL, emR3Save, NULL,
164 NULL, emR3Load, NULL);
165 if (RT_FAILURE(rc))
166 return rc;
167
168 for (VMCPUID i = 0; i < pVM->cCpus; i++)
169 {
170 PVMCPU pVCpu = &pVM->aCpus[i];
171
172 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
173 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
174 pVCpu->em.s.fForceRAW = false;
175
176 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
177#ifdef VBOX_WITH_RAW_MODE
178 if (!HMIsEnabled(pVM))
179 {
180 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
181 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
182 }
183#endif
184
185 /* Force reset of the time slice. */
186 pVCpu->em.s.u64TimeSliceStart = 0;
187
188# define EM_REG_COUNTER(a, b, c) \
189 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
190 AssertRC(rc);
191
192# define EM_REG_COUNTER_USED(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_PROFILE(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE_ADV(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204 /*
205 * Statistics.
206 */
207#ifdef VBOX_WITH_STATISTICS
208 PEMSTATS pStats;
209 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
210 if (RT_FAILURE(rc))
211 return rc;
212
213 pVCpu->em.s.pStatsR3 = pStats;
214 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
215 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
216
217 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
218 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
219
220 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
221 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
222
223 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
297
298 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
299 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
300
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
353
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
382
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
387
388 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
389 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
390 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
391 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
392 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
393 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
394 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
414
415 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
416 pVCpu->em.s.pCliStatTree = 0;
417
418 /* these should be considered for release statistics. */
419 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
420 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
421 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
422 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
423 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
424 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
425 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
431
432#endif /* VBOX_WITH_STATISTICS */
433
434 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
435 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
436 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
437 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
438 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
439
440 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
441 }
442
443 emR3InitDbg(pVM);
444 return VINF_SUCCESS;
445}
446
447
448/**
449 * Applies relocations to data and code managed by this
450 * component. This function will be called at init and
451 * whenever the VMM need to relocate it self inside the GC.
452 *
453 * @param pVM Pointer to the VM.
454 */
455VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
456{
457 LogFlow(("EMR3Relocate\n"));
458 for (VMCPUID i = 0; i < pVM->cCpus; i++)
459 {
460 PVMCPU pVCpu = &pVM->aCpus[i];
461 if (pVCpu->em.s.pStatsR3)
462 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
463 }
464}
465
466
467/**
468 * Reset the EM state for a CPU.
469 *
470 * Called by EMR3Reset and hot plugging.
471 *
472 * @param pVCpu Pointer to the VMCPU.
473 */
474VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
475{
476 pVCpu->em.s.fForceRAW = false;
477
478 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
479 out of the HALTED state here so that enmPrevState doesn't end up as
480 HALTED when EMR3Execute returns. */
481 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
482 {
483 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
484 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
485 }
486}
487
488
489/**
490 * Reset notification.
491 *
492 * @param pVM Pointer to the VM.
493 */
494VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
495{
496 Log(("EMR3Reset: \n"));
497 for (VMCPUID i = 0; i < pVM->cCpus; i++)
498 EMR3ResetCpu(&pVM->aCpus[i]);
499}
500
501
502/**
503 * Terminates the EM.
504 *
505 * Termination means cleaning up and freeing all resources,
506 * the VM it self is at this point powered off or suspended.
507 *
508 * @returns VBox status code.
509 * @param pVM Pointer to the VM.
510 */
511VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
512{
513 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
514
515#ifdef VBOX_WITH_REM
516 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
517#endif
518 return VINF_SUCCESS;
519}
520
521
522/**
523 * Execute state save operation.
524 *
525 * @returns VBox status code.
526 * @param pVM Pointer to the VM.
527 * @param pSSM SSM operation handle.
528 */
529static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
530{
531 for (VMCPUID i = 0; i < pVM->cCpus; i++)
532 {
533 PVMCPU pVCpu = &pVM->aCpus[i];
534
535 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
536 AssertRCReturn(rc, rc);
537
538 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
539 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
540 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
541 AssertRCReturn(rc, rc);
542
543 /* Save mwait state. */
544 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
545 AssertRCReturn(rc, rc);
546 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
547 AssertRCReturn(rc, rc);
548 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
549 AssertRCReturn(rc, rc);
550 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
551 AssertRCReturn(rc, rc);
552 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
553 AssertRCReturn(rc, rc);
554 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
555 AssertRCReturn(rc, rc);
556 }
557 return VINF_SUCCESS;
558}
559
560
561/**
562 * Execute state load operation.
563 *
564 * @returns VBox status code.
565 * @param pVM Pointer to the VM.
566 * @param pSSM SSM operation handle.
567 * @param uVersion Data layout version.
568 * @param uPass The data pass.
569 */
570static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
571{
572 /*
573 * Validate version.
574 */
575 if ( uVersion > EM_SAVED_STATE_VERSION
576 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
577 {
578 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
579 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
580 }
581 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
582
583 /*
584 * Load the saved state.
585 */
586 for (VMCPUID i = 0; i < pVM->cCpus; i++)
587 {
588 PVMCPU pVCpu = &pVM->aCpus[i];
589
590 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
591 if (RT_FAILURE(rc))
592 pVCpu->em.s.fForceRAW = false;
593 AssertRCReturn(rc, rc);
594
595 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
596 {
597 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
598 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
599 AssertRCReturn(rc, rc);
600 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
601
602 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
603 }
604 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
605 {
606 /* Load mwait state. */
607 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
608 AssertRCReturn(rc, rc);
609 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
610 AssertRCReturn(rc, rc);
611 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
612 AssertRCReturn(rc, rc);
613 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
614 AssertRCReturn(rc, rc);
615 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
616 AssertRCReturn(rc, rc);
617 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
618 AssertRCReturn(rc, rc);
619 }
620
621 Assert(!pVCpu->em.s.pCliStatTree);
622 }
623 return VINF_SUCCESS;
624}
625
626
627/**
628 * Argument packet for emR3SetExecutionPolicy.
629 */
630struct EMR3SETEXECPOLICYARGS
631{
632 EMEXECPOLICY enmPolicy;
633 bool fEnforce;
634};
635
636
637/**
638 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
639 */
640static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
641{
642 /*
643 * Only the first CPU changes the variables.
644 */
645 if (pVCpu->idCpu == 0)
646 {
647 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
648 switch (pArgs->enmPolicy)
649 {
650 case EMEXECPOLICY_RECOMPILE_RING0:
651 pVM->fRecompileSupervisor = pArgs->fEnforce;
652 break;
653 case EMEXECPOLICY_RECOMPILE_RING3:
654 pVM->fRecompileUser = pArgs->fEnforce;
655 break;
656 case EMEXECPOLICY_IEM_ALL:
657 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
658 break;
659 default:
660 AssertFailedReturn(VERR_INVALID_PARAMETER);
661 }
662 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
663 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
664 }
665
666 /*
667 * Force rescheduling if in RAW, HM, IEM, or REM.
668 */
669 return pVCpu->em.s.enmState == EMSTATE_RAW
670 || pVCpu->em.s.enmState == EMSTATE_HM
671 || pVCpu->em.s.enmState == EMSTATE_IEM
672 || pVCpu->em.s.enmState == EMSTATE_REM
673 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
674 ? VINF_EM_RESCHEDULE
675 : VINF_SUCCESS;
676}
677
678
679/**
680 * Changes an execution scheduling policy parameter.
681 *
682 * This is used to enable or disable raw-mode / hardware-virtualization
683 * execution of user and supervisor code.
684 *
685 * @returns VINF_SUCCESS on success.
686 * @returns VINF_RESCHEDULE if a rescheduling might be required.
687 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
688 *
689 * @param pUVM The user mode VM handle.
690 * @param enmPolicy The scheduling policy to change.
691 * @param fEnforce Whether to enforce the policy or not.
692 */
693VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
694{
695 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
696 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
697 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
698
699 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
700 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
701}
702
703
704/**
705 * Queries an execution scheduling policy parameter.
706 *
707 * @returns VBox status code
708 * @param pUVM The user mode VM handle.
709 * @param enmPolicy The scheduling policy to query.
710 * @param pfEnforced Where to return the current value.
711 */
712VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
713{
714 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
715 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
716 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
717 PVM pVM = pUVM->pVM;
718 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
719
720 /* No need to bother EMTs with a query. */
721 switch (enmPolicy)
722 {
723 case EMEXECPOLICY_RECOMPILE_RING0:
724 *pfEnforced = pVM->fRecompileSupervisor;
725 break;
726 case EMEXECPOLICY_RECOMPILE_RING3:
727 *pfEnforced = pVM->fRecompileUser;
728 break;
729 case EMEXECPOLICY_IEM_ALL:
730 *pfEnforced = pVM->em.s.fIemExecutesAll;
731 break;
732 default:
733 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
734 }
735
736 return VINF_SUCCESS;
737}
738
739
740/**
741 * Raise a fatal error.
742 *
743 * Safely terminate the VM with full state report and stuff. This function
744 * will naturally never return.
745 *
746 * @param pVCpu Pointer to the VMCPU.
747 * @param rc VBox status code.
748 */
749VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
750{
751 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
752 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
753 AssertReleaseMsgFailed(("longjmp returned!\n"));
754}
755
756
757#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
758/**
759 * Gets the EM state name.
760 *
761 * @returns pointer to read only state name,
762 * @param enmState The state.
763 */
764static const char *emR3GetStateName(EMSTATE enmState)
765{
766 switch (enmState)
767 {
768 case EMSTATE_NONE: return "EMSTATE_NONE";
769 case EMSTATE_RAW: return "EMSTATE_RAW";
770 case EMSTATE_HM: return "EMSTATE_HM";
771 case EMSTATE_IEM: return "EMSTATE_IEM";
772 case EMSTATE_REM: return "EMSTATE_REM";
773 case EMSTATE_HALTED: return "EMSTATE_HALTED";
774 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
775 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
776 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
777 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
778 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
779 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
780 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
781 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
782 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
783 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
784 default: return "Unknown!";
785 }
786}
787#endif /* LOG_ENABLED || VBOX_STRICT */
788
789
790/**
791 * Debug loop.
792 *
793 * @returns VBox status code for EM.
794 * @param pVM Pointer to the VM.
795 * @param pVCpu Pointer to the VMCPU.
796 * @param rc Current EM VBox status code.
797 */
798static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
799{
800 for (;;)
801 {
802 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
803 const VBOXSTRICTRC rcLast = rc;
804
805 /*
806 * Debug related RC.
807 */
808 switch (VBOXSTRICTRC_VAL(rc))
809 {
810 /*
811 * Single step an instruction.
812 */
813 case VINF_EM_DBG_STEP:
814 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
815 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
816 || pVCpu->em.s.fForceRAW /* paranoia */)
817#ifdef VBOX_WITH_RAW_MODE
818 rc = emR3RawStep(pVM, pVCpu);
819#else
820 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
821#endif
822 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
823 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
824#ifdef VBOX_WITH_REM
825 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
826 rc = emR3RemStep(pVM, pVCpu);
827#endif
828 else
829 {
830 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
831 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
832 rc = VINF_EM_DBG_STEPPED;
833 }
834 break;
835
836 /*
837 * Simple events: stepped, breakpoint, stop/assertion.
838 */
839 case VINF_EM_DBG_STEPPED:
840 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
841 break;
842
843 case VINF_EM_DBG_BREAKPOINT:
844 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
845 break;
846
847 case VINF_EM_DBG_STOP:
848 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
849 break;
850
851 case VINF_EM_DBG_HYPER_STEPPED:
852 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
853 break;
854
855 case VINF_EM_DBG_HYPER_BREAKPOINT:
856 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
857 break;
858
859 case VINF_EM_DBG_HYPER_ASSERTION:
860 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
861 RTLogFlush(NULL);
862 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
863 break;
864
865 /*
866 * Guru meditation.
867 */
868 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
869 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
870 break;
871 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
872 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
873 break;
874
875 default: /** @todo don't use default for guru, but make special errors code! */
876 {
877 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
878 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
879 break;
880 }
881 }
882
883 /*
884 * Process the result.
885 */
886 do
887 {
888 switch (VBOXSTRICTRC_VAL(rc))
889 {
890 /*
891 * Continue the debugging loop.
892 */
893 case VINF_EM_DBG_STEP:
894 case VINF_EM_DBG_STOP:
895 case VINF_EM_DBG_STEPPED:
896 case VINF_EM_DBG_BREAKPOINT:
897 case VINF_EM_DBG_HYPER_STEPPED:
898 case VINF_EM_DBG_HYPER_BREAKPOINT:
899 case VINF_EM_DBG_HYPER_ASSERTION:
900 break;
901
902 /*
903 * Resuming execution (in some form) has to be done here if we got
904 * a hypervisor debug event.
905 */
906 case VINF_SUCCESS:
907 case VINF_EM_RESUME:
908 case VINF_EM_SUSPEND:
909 case VINF_EM_RESCHEDULE:
910 case VINF_EM_RESCHEDULE_RAW:
911 case VINF_EM_RESCHEDULE_REM:
912 case VINF_EM_HALT:
913 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
914 {
915#ifdef VBOX_WITH_RAW_MODE
916 rc = emR3RawResumeHyper(pVM, pVCpu);
917 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
918 continue;
919#else
920 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
921#endif
922 }
923 if (rc == VINF_SUCCESS)
924 rc = VINF_EM_RESCHEDULE;
925 return rc;
926
927 /*
928 * The debugger isn't attached.
929 * We'll simply turn the thing off since that's the easiest thing to do.
930 */
931 case VERR_DBGF_NOT_ATTACHED:
932 switch (VBOXSTRICTRC_VAL(rcLast))
933 {
934 case VINF_EM_DBG_HYPER_STEPPED:
935 case VINF_EM_DBG_HYPER_BREAKPOINT:
936 case VINF_EM_DBG_HYPER_ASSERTION:
937 case VERR_TRPM_PANIC:
938 case VERR_TRPM_DONT_PANIC:
939 case VERR_VMM_RING0_ASSERTION:
940 case VERR_VMM_HYPER_CR3_MISMATCH:
941 case VERR_VMM_RING3_CALL_DISABLED:
942 return rcLast;
943 }
944 return VINF_EM_OFF;
945
946 /*
947 * Status codes terminating the VM in one or another sense.
948 */
949 case VINF_EM_TERMINATE:
950 case VINF_EM_OFF:
951 case VINF_EM_RESET:
952 case VINF_EM_NO_MEMORY:
953 case VINF_EM_RAW_STALE_SELECTOR:
954 case VINF_EM_RAW_IRET_TRAP:
955 case VERR_TRPM_PANIC:
956 case VERR_TRPM_DONT_PANIC:
957 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
958 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
959 case VERR_VMM_RING0_ASSERTION:
960 case VERR_VMM_HYPER_CR3_MISMATCH:
961 case VERR_VMM_RING3_CALL_DISABLED:
962 case VERR_INTERNAL_ERROR:
963 case VERR_INTERNAL_ERROR_2:
964 case VERR_INTERNAL_ERROR_3:
965 case VERR_INTERNAL_ERROR_4:
966 case VERR_INTERNAL_ERROR_5:
967 case VERR_IPE_UNEXPECTED_STATUS:
968 case VERR_IPE_UNEXPECTED_INFO_STATUS:
969 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
970 return rc;
971
972 /*
973 * The rest is unexpected, and will keep us here.
974 */
975 default:
976 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
977 break;
978 }
979 } while (false);
980 } /* debug for ever */
981}
982
983
984/**
985 * Steps recompiled code.
986 *
987 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
988 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
989 *
990 * @param pVM Pointer to the VM.
991 * @param pVCpu Pointer to the VMCPU.
992 */
993static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
994{
995 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
996
997#ifdef VBOX_WITH_REM
998 EMRemLock(pVM);
999
1000 /*
1001 * Switch to REM, step instruction, switch back.
1002 */
1003 int rc = REMR3State(pVM, pVCpu);
1004 if (RT_SUCCESS(rc))
1005 {
1006 rc = REMR3Step(pVM, pVCpu);
1007 REMR3StateBack(pVM, pVCpu);
1008 }
1009 EMRemUnlock(pVM);
1010
1011#else
1012 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1013#endif
1014
1015 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1016 return rc;
1017}
1018
1019
1020/**
1021 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1022 * critical section.
1023 *
1024 * @returns false - new fInREMState value.
1025 * @param pVM Pointer to the VM.
1026 * @param pVCpu Pointer to the VMCPU.
1027 */
1028DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1029{
1030#ifdef VBOX_WITH_REM
1031 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1032 REMR3StateBack(pVM, pVCpu);
1033 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1034
1035 EMRemUnlock(pVM);
1036#endif
1037 return false;
1038}
1039
1040
1041/**
1042 * Executes recompiled code.
1043 *
1044 * This function contains the recompiler version of the inner
1045 * execution loop (the outer loop being in EMR3ExecuteVM()).
1046 *
1047 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1048 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1049 *
1050 * @param pVM Pointer to the VM.
1051 * @param pVCpu Pointer to the VMCPU.
1052 * @param pfFFDone Where to store an indicator telling whether or not
1053 * FFs were done before returning.
1054 *
1055 */
1056static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1057{
1058#ifdef LOG_ENABLED
1059 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1060 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1061
1062 if (pCtx->eflags.Bits.u1VM)
1063 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1064 else
1065 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1066#endif
1067 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1068
1069#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1070 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1071 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1072 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1073#endif
1074
1075 /*
1076 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1077 * or the REM suggests raw-mode execution.
1078 */
1079 *pfFFDone = false;
1080#ifdef VBOX_WITH_REM
1081 bool fInREMState = false;
1082#endif
1083 int rc = VINF_SUCCESS;
1084 for (;;)
1085 {
1086#ifdef VBOX_WITH_REM
1087 /*
1088 * Lock REM and update the state if not already in sync.
1089 *
1090 * Note! Big lock, but you are not supposed to own any lock when
1091 * coming in here.
1092 */
1093 if (!fInREMState)
1094 {
1095 EMRemLock(pVM);
1096 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1097
1098 /* Flush the recompiler translation blocks if the VCPU has changed,
1099 also force a full CPU state resync. */
1100 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1101 {
1102 REMFlushTBs(pVM);
1103 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1104 }
1105 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1106
1107 rc = REMR3State(pVM, pVCpu);
1108
1109 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1110 if (RT_FAILURE(rc))
1111 break;
1112 fInREMState = true;
1113
1114 /*
1115 * We might have missed the raising of VMREQ, TIMER and some other
1116 * important FFs while we were busy switching the state. So, check again.
1117 */
1118 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1119 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1120 {
1121 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1122 goto l_REMDoForcedActions;
1123 }
1124 }
1125#endif
1126
1127 /*
1128 * Execute REM.
1129 */
1130 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1131 {
1132 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1133#ifdef VBOX_WITH_REM
1134 rc = REMR3Run(pVM, pVCpu);
1135#else
1136 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
1137#endif
1138 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1139 }
1140 else
1141 {
1142 /* Give up this time slice; virtual time continues */
1143 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1144 RTThreadSleep(5);
1145 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1146 rc = VINF_SUCCESS;
1147 }
1148
1149 /*
1150 * Deal with high priority post execution FFs before doing anything
1151 * else. Sync back the state and leave the lock to be on the safe side.
1152 */
1153 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1154 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1155 {
1156#ifdef VBOX_WITH_REM
1157 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1158#endif
1159 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1160 }
1161
1162 /*
1163 * Process the returned status code.
1164 */
1165 if (rc != VINF_SUCCESS)
1166 {
1167 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1168 break;
1169 if (rc != VINF_REM_INTERRUPED_FF)
1170 {
1171 /*
1172 * Anything which is not known to us means an internal error
1173 * and the termination of the VM!
1174 */
1175 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1176 break;
1177 }
1178 }
1179
1180
1181 /*
1182 * Check and execute forced actions.
1183 *
1184 * Sync back the VM state and leave the lock before calling any of
1185 * these, you never know what's going to happen here.
1186 */
1187#ifdef VBOX_HIGH_RES_TIMERS_HACK
1188 TMTimerPollVoid(pVM, pVCpu);
1189#endif
1190 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1191 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1192 || VMCPU_FF_IS_PENDING(pVCpu,
1193 VMCPU_FF_ALL_REM_MASK
1194 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1195 {
1196l_REMDoForcedActions:
1197#ifdef VBOX_WITH_REM
1198 if (fInREMState)
1199 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1200#endif
1201 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1202 rc = emR3ForcedActions(pVM, pVCpu, rc);
1203 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1204 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1205 if ( rc != VINF_SUCCESS
1206 && rc != VINF_EM_RESCHEDULE_REM)
1207 {
1208 *pfFFDone = true;
1209 break;
1210 }
1211 }
1212
1213 } /* The Inner Loop, recompiled execution mode version. */
1214
1215
1216#ifdef VBOX_WITH_REM
1217 /*
1218 * Returning. Sync back the VM state if required.
1219 */
1220 if (fInREMState)
1221 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1222#endif
1223
1224 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1225 return rc;
1226}
1227
1228
1229#ifdef DEBUG
1230
1231int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1232{
1233 EMSTATE enmOldState = pVCpu->em.s.enmState;
1234
1235 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1236
1237 Log(("Single step BEGIN:\n"));
1238 for (uint32_t i = 0; i < cIterations; i++)
1239 {
1240 DBGFR3PrgStep(pVCpu);
1241 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1242 emR3RemStep(pVM, pVCpu);
1243 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1244 break;
1245 }
1246 Log(("Single step END:\n"));
1247 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1248 pVCpu->em.s.enmState = enmOldState;
1249 return VINF_EM_RESCHEDULE;
1250}
1251
1252#endif /* DEBUG */
1253
1254
1255/**
1256 * Try execute the problematic code in IEM first, then fall back on REM if there
1257 * is too much of it or if IEM doesn't implement something.
1258 *
1259 * @returns Strict VBox status code from IEMExecLots.
1260 * @param pVM The cross context VM structure.
1261 * @param pVCpu The cross context CPU structure for the calling EMT.
1262 * @param pfFFDone Force flags done indicator.
1263 *
1264 * @thread EMT(pVCpu)
1265 */
1266static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1267{
1268 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1269 *pfFFDone = false;
1270
1271 /*
1272 * Execute in IEM for a while.
1273 */
1274 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1275 {
1276 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu);
1277 if (rcStrict != VINF_SUCCESS)
1278 {
1279 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1280 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1281 break;
1282
1283 pVCpu->em.s.cIemThenRemInstructions++;
1284 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1285 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1286 return rcStrict;
1287 }
1288 pVCpu->em.s.cIemThenRemInstructions++;
1289
1290 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1291 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1292 {
1293 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1294 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1295 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1296 pVCpu->em.s.enmState = enmNewState;
1297 return VINF_SUCCESS;
1298 }
1299
1300 /*
1301 * Check for pending actions.
1302 */
1303 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1304 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1305 return VINF_SUCCESS;
1306 }
1307
1308 /*
1309 * Switch to REM.
1310 */
1311 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1312 pVCpu->em.s.enmState = EMSTATE_REM;
1313 return VINF_SUCCESS;
1314}
1315
1316
1317/**
1318 * Decides whether to execute RAW, HWACC or REM.
1319 *
1320 * @returns new EM state
1321 * @param pVM Pointer to the VM.
1322 * @param pVCpu Pointer to the VMCPU.
1323 * @param pCtx Pointer to the guest CPU context.
1324 */
1325EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1326{
1327 /*
1328 * When forcing raw-mode execution, things are simple.
1329 */
1330 if (pVCpu->em.s.fForceRAW)
1331 return EMSTATE_RAW;
1332
1333 /*
1334 * We stay in the wait for SIPI state unless explicitly told otherwise.
1335 */
1336 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1337 return EMSTATE_WAIT_SIPI;
1338
1339 /*
1340 * Execute everything in IEM?
1341 */
1342 if (pVM->em.s.fIemExecutesAll)
1343 return EMSTATE_IEM;
1344
1345 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1346 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1347 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1348
1349 X86EFLAGS EFlags = pCtx->eflags;
1350 if (HMIsEnabled(pVM))
1351 {
1352 /*
1353 * Hardware accelerated raw-mode:
1354 */
1355 if ( EMIsHwVirtExecutionEnabled(pVM)
1356 && HMR3CanExecuteGuest(pVM, pCtx))
1357 return EMSTATE_HM;
1358
1359 /*
1360 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1361 * turns off monitoring features essential for raw mode!
1362 */
1363 return EMSTATE_IEM_THEN_REM;
1364 }
1365
1366 /*
1367 * Standard raw-mode:
1368 *
1369 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1370 * or 32 bits protected mode ring 0 code
1371 *
1372 * The tests are ordered by the likelihood of being true during normal execution.
1373 */
1374 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1375 {
1376 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1377 return EMSTATE_REM;
1378 }
1379
1380# ifndef VBOX_RAW_V86
1381 if (EFlags.u32 & X86_EFL_VM) {
1382 Log2(("raw mode refused: VM_MASK\n"));
1383 return EMSTATE_REM;
1384 }
1385# endif
1386
1387 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1388 uint32_t u32CR0 = pCtx->cr0;
1389 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1390 {
1391 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1392 return EMSTATE_REM;
1393 }
1394
1395 if (pCtx->cr4 & X86_CR4_PAE)
1396 {
1397 uint32_t u32Dummy, u32Features;
1398
1399 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1400 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1401 return EMSTATE_REM;
1402 }
1403
1404 unsigned uSS = pCtx->ss.Sel;
1405 if ( pCtx->eflags.Bits.u1VM
1406 || (uSS & X86_SEL_RPL) == 3)
1407 {
1408 if (!EMIsRawRing3Enabled(pVM))
1409 return EMSTATE_REM;
1410
1411 if (!(EFlags.u32 & X86_EFL_IF))
1412 {
1413 Log2(("raw mode refused: IF (RawR3)\n"));
1414 return EMSTATE_REM;
1415 }
1416
1417 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1418 {
1419 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1420 return EMSTATE_REM;
1421 }
1422 }
1423 else
1424 {
1425 if (!EMIsRawRing0Enabled(pVM))
1426 return EMSTATE_REM;
1427
1428 if (EMIsRawRing1Enabled(pVM))
1429 {
1430 /* Only ring 0 and 1 supervisor code. */
1431 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1432 {
1433 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1434 return EMSTATE_REM;
1435 }
1436 }
1437 /* Only ring 0 supervisor code. */
1438 else if ((uSS & X86_SEL_RPL) != 0)
1439 {
1440 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1441 return EMSTATE_REM;
1442 }
1443
1444 // Let's start with pure 32 bits ring 0 code first
1445 /** @todo What's pure 32-bit mode? flat? */
1446 if ( !(pCtx->ss.Attr.n.u1DefBig)
1447 || !(pCtx->cs.Attr.n.u1DefBig))
1448 {
1449 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1450 return EMSTATE_REM;
1451 }
1452
1453 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1454 if (!(u32CR0 & X86_CR0_WP))
1455 {
1456 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1457 return EMSTATE_REM;
1458 }
1459
1460# ifdef VBOX_WITH_RAW_MODE
1461 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1462 {
1463 Log2(("raw r0 mode forced: patch code\n"));
1464# ifdef VBOX_WITH_SAFE_STR
1465 Assert(pCtx->tr.Sel);
1466# endif
1467 return EMSTATE_RAW;
1468 }
1469# endif /* VBOX_WITH_RAW_MODE */
1470
1471# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1472 if (!(EFlags.u32 & X86_EFL_IF))
1473 {
1474 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1475 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1476 return EMSTATE_REM;
1477 }
1478# endif
1479
1480# ifndef VBOX_WITH_RAW_RING1
1481 /** @todo still necessary??? */
1482 if (EFlags.Bits.u2IOPL != 0)
1483 {
1484 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1485 return EMSTATE_REM;
1486 }
1487# endif
1488 }
1489
1490 /*
1491 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1492 */
1493 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1494 {
1495 Log2(("raw mode refused: stale CS\n"));
1496 return EMSTATE_REM;
1497 }
1498 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1499 {
1500 Log2(("raw mode refused: stale SS\n"));
1501 return EMSTATE_REM;
1502 }
1503 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1504 {
1505 Log2(("raw mode refused: stale DS\n"));
1506 return EMSTATE_REM;
1507 }
1508 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1509 {
1510 Log2(("raw mode refused: stale ES\n"));
1511 return EMSTATE_REM;
1512 }
1513 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1514 {
1515 Log2(("raw mode refused: stale FS\n"));
1516 return EMSTATE_REM;
1517 }
1518 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1519 {
1520 Log2(("raw mode refused: stale GS\n"));
1521 return EMSTATE_REM;
1522 }
1523
1524# ifdef VBOX_WITH_SAFE_STR
1525 if (pCtx->tr.Sel == 0)
1526 {
1527 Log(("Raw mode refused -> TR=0\n"));
1528 return EMSTATE_REM;
1529 }
1530# endif
1531
1532 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1533 return EMSTATE_RAW;
1534}
1535
1536
1537/**
1538 * Executes all high priority post execution force actions.
1539 *
1540 * @returns rc or a fatal status code.
1541 *
1542 * @param pVM Pointer to the VM.
1543 * @param pVCpu Pointer to the VMCPU.
1544 * @param rc The current rc.
1545 */
1546int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1547{
1548 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1549
1550 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1551 PDMCritSectBothFF(pVCpu);
1552
1553 /* Update CR3 (Nested Paging case for HM). */
1554 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1555 {
1556 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1557 if (RT_FAILURE(rc2))
1558 return rc2;
1559 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1560 }
1561
1562 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1563 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1564 {
1565 if (CPUMIsGuestInPAEMode(pVCpu))
1566 {
1567 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1568 AssertPtr(pPdpes);
1569
1570 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1571 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1572 }
1573 else
1574 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1575 }
1576
1577 /* IEM has pending work (typically memory write after INS instruction). */
1578 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1579 rc = VBOXSTRICTRC_TODO(IEMR3DoPendingAction(pVCpu, rc));
1580
1581#ifdef VBOX_WITH_RAW_MODE
1582 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1583 CSAMR3DoPendingAction(pVM, pVCpu);
1584#endif
1585
1586 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1587 {
1588 if ( rc > VINF_EM_NO_MEMORY
1589 && rc <= VINF_EM_LAST)
1590 rc = VINF_EM_NO_MEMORY;
1591 }
1592
1593 return rc;
1594}
1595
1596
1597/**
1598 * Executes all pending forced actions.
1599 *
1600 * Forced actions can cause execution delays and execution
1601 * rescheduling. The first we deal with using action priority, so
1602 * that for instance pending timers aren't scheduled and ran until
1603 * right before execution. The rescheduling we deal with using
1604 * return codes. The same goes for VM termination, only in that case
1605 * we exit everything.
1606 *
1607 * @returns VBox status code of equal or greater importance/severity than rc.
1608 * The most important ones are: VINF_EM_RESCHEDULE,
1609 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1610 *
1611 * @param pVM Pointer to the VM.
1612 * @param pVCpu Pointer to the VMCPU.
1613 * @param rc The current rc.
1614 *
1615 */
1616int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1617{
1618 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1619#ifdef VBOX_STRICT
1620 int rcIrq = VINF_SUCCESS;
1621#endif
1622 int rc2;
1623#define UPDATE_RC() \
1624 do { \
1625 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1626 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1627 break; \
1628 if (!rc || rc2 < rc) \
1629 rc = rc2; \
1630 } while (0)
1631 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1632
1633 /*
1634 * Post execution chunk first.
1635 */
1636 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1637 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1638 {
1639 /*
1640 * EMT Rendezvous (must be serviced before termination).
1641 */
1642 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1643 {
1644 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1645 UPDATE_RC();
1646 /** @todo HACK ALERT! The following test is to make sure EM+TM
1647 * thinks the VM is stopped/reset before the next VM state change
1648 * is made. We need a better solution for this, or at least make it
1649 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1650 * VINF_EM_SUSPEND). */
1651 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1652 {
1653 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1654 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1655 return rc;
1656 }
1657 }
1658
1659 /*
1660 * State change request (cleared by vmR3SetStateLocked).
1661 */
1662 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1663 {
1664 VMSTATE enmState = VMR3GetState(pVM);
1665 switch (enmState)
1666 {
1667 case VMSTATE_FATAL_ERROR:
1668 case VMSTATE_FATAL_ERROR_LS:
1669 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1670 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1671 return VINF_EM_SUSPEND;
1672
1673 case VMSTATE_DESTROYING:
1674 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1675 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1676 return VINF_EM_TERMINATE;
1677
1678 default:
1679 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1680 }
1681 }
1682
1683 /*
1684 * Debugger Facility polling.
1685 */
1686 if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF))
1687 {
1688 rc2 = DBGFR3VMMForcedAction(pVM);
1689 UPDATE_RC();
1690 }
1691
1692 /*
1693 * Postponed reset request.
1694 */
1695 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1696 {
1697 rc2 = VMR3Reset(pVM->pUVM);
1698 UPDATE_RC();
1699 }
1700
1701#ifdef VBOX_WITH_RAW_MODE
1702 /*
1703 * CSAM page scanning.
1704 */
1705 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1706 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1707 {
1708 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1709
1710 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1711 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1712
1713 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1714 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1715 }
1716#endif
1717
1718 /*
1719 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1720 */
1721 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1722 {
1723 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1724 UPDATE_RC();
1725 if (rc == VINF_EM_NO_MEMORY)
1726 return rc;
1727 }
1728
1729 /* check that we got them all */
1730 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1731 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0));
1732 }
1733
1734 /*
1735 * Normal priority then.
1736 * (Executed in no particular order.)
1737 */
1738 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1739 {
1740 /*
1741 * PDM Queues are pending.
1742 */
1743 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1744 PDMR3QueueFlushAll(pVM);
1745
1746 /*
1747 * PDM DMA transfers are pending.
1748 */
1749 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1750 PDMR3DmaRun(pVM);
1751
1752 /*
1753 * EMT Rendezvous (make sure they are handled before the requests).
1754 */
1755 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1756 {
1757 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1758 UPDATE_RC();
1759 /** @todo HACK ALERT! The following test is to make sure EM+TM
1760 * thinks the VM is stopped/reset before the next VM state change
1761 * is made. We need a better solution for this, or at least make it
1762 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1763 * VINF_EM_SUSPEND). */
1764 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1765 {
1766 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1767 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1768 return rc;
1769 }
1770 }
1771
1772 /*
1773 * Requests from other threads.
1774 */
1775 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1776 {
1777 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1778 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1779 {
1780 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1781 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1782 return rc2;
1783 }
1784 UPDATE_RC();
1785 /** @todo HACK ALERT! The following test is to make sure EM+TM
1786 * thinks the VM is stopped/reset before the next VM state change
1787 * is made. We need a better solution for this, or at least make it
1788 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1789 * VINF_EM_SUSPEND). */
1790 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1791 {
1792 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1793 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1794 return rc;
1795 }
1796 }
1797
1798#ifdef VBOX_WITH_REM
1799 /* Replay the handler notification changes. */
1800 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1801 {
1802 /* Try not to cause deadlocks. */
1803 if ( pVM->cCpus == 1
1804 || ( !PGMIsLockOwner(pVM)
1805 && !IOMIsLockWriteOwner(pVM))
1806 )
1807 {
1808 EMRemLock(pVM);
1809 REMR3ReplayHandlerNotifications(pVM);
1810 EMRemUnlock(pVM);
1811 }
1812 }
1813#endif
1814
1815 /* check that we got them all */
1816 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1817 }
1818
1819 /*
1820 * Normal priority then. (per-VCPU)
1821 * (Executed in no particular order.)
1822 */
1823 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1824 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1825 {
1826 /*
1827 * Requests from other threads.
1828 */
1829 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1830 {
1831 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1832 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1833 {
1834 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1835 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1836 return rc2;
1837 }
1838 UPDATE_RC();
1839 /** @todo HACK ALERT! The following test is to make sure EM+TM
1840 * thinks the VM is stopped/reset before the next VM state change
1841 * is made. We need a better solution for this, or at least make it
1842 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1843 * VINF_EM_SUSPEND). */
1844 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1845 {
1846 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1847 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1848 return rc;
1849 }
1850 }
1851
1852 /*
1853 * Forced unhalting of EMT.
1854 */
1855 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
1856 {
1857 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
1858 if (rc == VINF_EM_HALT)
1859 rc = VINF_EM_RESCHEDULE;
1860 else
1861 {
1862 rc2 = VINF_EM_RESCHEDULE;
1863 UPDATE_RC();
1864 }
1865 }
1866
1867 /* check that we got them all */
1868 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST | VMCPU_FF_UNHALT)));
1869 }
1870
1871 /*
1872 * High priority pre execution chunk last.
1873 * (Executed in ascending priority order.)
1874 */
1875 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1876 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1877 {
1878 /*
1879 * Timers before interrupts.
1880 */
1881 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1882 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1883 TMR3TimerQueuesDo(pVM);
1884
1885 /*
1886 * The instruction following an emulated STI should *always* be executed!
1887 *
1888 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1889 * the eip is the same as the inhibited instr address. Before we
1890 * are able to execute this instruction in raw mode (iret to
1891 * guest code) an external interrupt might force a world switch
1892 * again. Possibly allowing a guest interrupt to be dispatched
1893 * in the process. This could break the guest. Sounds very
1894 * unlikely, but such timing sensitive problem are not as rare as
1895 * you might think.
1896 */
1897 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1898 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1899 {
1900 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1901 {
1902 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1903 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1904 }
1905 else
1906 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1907 }
1908
1909 /*
1910 * Interrupts.
1911 */
1912 bool fWakeupPending = false;
1913 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1914 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1915 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1916 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1917#ifdef VBOX_WITH_RAW_MODE
1918 && PATMAreInterruptsEnabled(pVM)
1919#else
1920 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1921#endif
1922 && !HMR3IsEventPending(pVCpu))
1923 {
1924 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1925 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1926 {
1927 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1928 /** @todo this really isn't nice, should properly handle this */
1929 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1930 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1931 rc2 = VINF_EM_RESCHEDULE;
1932#ifdef VBOX_STRICT
1933 rcIrq = rc2;
1934#endif
1935 UPDATE_RC();
1936 /* Reschedule required: We must not miss the wakeup below! */
1937 fWakeupPending = true;
1938 }
1939#ifdef VBOX_WITH_REM
1940 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1941 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1942 {
1943 Log2(("REMR3QueryPendingInterrupt -> %#x\n", REMR3QueryPendingInterrupt(pVM, pVCpu)));
1944 rc2 = VINF_EM_RESCHEDULE_REM;
1945 UPDATE_RC();
1946 }
1947#endif
1948 }
1949
1950 /*
1951 * Allocate handy pages.
1952 */
1953 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1954 {
1955 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1956 UPDATE_RC();
1957 }
1958
1959 /*
1960 * Debugger Facility request.
1961 */
1962 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1963 {
1964 rc2 = DBGFR3VMMForcedAction(pVM);
1965 UPDATE_RC();
1966 }
1967
1968 /*
1969 * EMT Rendezvous (must be serviced before termination).
1970 */
1971 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1972 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1973 {
1974 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1975 UPDATE_RC();
1976 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1977 * stopped/reset before the next VM state change is made. We need a better
1978 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1979 * && rc >= VINF_EM_SUSPEND). */
1980 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1981 {
1982 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1983 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1984 return rc;
1985 }
1986 }
1987
1988 /*
1989 * State change request (cleared by vmR3SetStateLocked).
1990 */
1991 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1992 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1993 {
1994 VMSTATE enmState = VMR3GetState(pVM);
1995 switch (enmState)
1996 {
1997 case VMSTATE_FATAL_ERROR:
1998 case VMSTATE_FATAL_ERROR_LS:
1999 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2000 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2001 return VINF_EM_SUSPEND;
2002
2003 case VMSTATE_DESTROYING:
2004 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2005 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2006 return VINF_EM_TERMINATE;
2007
2008 default:
2009 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2010 }
2011 }
2012
2013 /*
2014 * Out of memory? Since most of our fellow high priority actions may cause us
2015 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2016 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2017 * than us since we can terminate without allocating more memory.
2018 */
2019 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2020 {
2021 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2022 UPDATE_RC();
2023 if (rc == VINF_EM_NO_MEMORY)
2024 return rc;
2025 }
2026
2027 /*
2028 * If the virtual sync clock is still stopped, make TM restart it.
2029 */
2030 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2031 TMR3VirtualSyncFF(pVM, pVCpu);
2032
2033#ifdef DEBUG
2034 /*
2035 * Debug, pause the VM.
2036 */
2037 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2038 {
2039 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2040 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2041 return VINF_EM_SUSPEND;
2042 }
2043#endif
2044
2045 /* check that we got them all */
2046 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2047 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2048 }
2049
2050#undef UPDATE_RC
2051 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2052 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2053 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2054 return rc;
2055}
2056
2057
2058/**
2059 * Check if the preset execution time cap restricts guest execution scheduling.
2060 *
2061 * @returns true if allowed, false otherwise
2062 * @param pVM Pointer to the VM.
2063 * @param pVCpu Pointer to the VMCPU.
2064 */
2065bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2066{
2067 uint64_t u64UserTime, u64KernelTime;
2068
2069 if ( pVM->uCpuExecutionCap != 100
2070 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2071 {
2072 uint64_t u64TimeNow = RTTimeMilliTS();
2073 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2074 {
2075 /* New time slice. */
2076 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2077 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2078 pVCpu->em.s.u64TimeSliceExec = 0;
2079 }
2080 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2081
2082 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2083 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2084 return false;
2085 }
2086 return true;
2087}
2088
2089
2090/**
2091 * Execute VM.
2092 *
2093 * This function is the main loop of the VM. The emulation thread
2094 * calls this function when the VM has been successfully constructed
2095 * and we're ready for executing the VM.
2096 *
2097 * Returning from this function means that the VM is turned off or
2098 * suspended (state already saved) and deconstruction is next in line.
2099 *
2100 * All interaction from other thread are done using forced actions
2101 * and signaling of the wait object.
2102 *
2103 * @returns VBox status code, informational status codes may indicate failure.
2104 * @param pVM Pointer to the VM.
2105 * @param pVCpu Pointer to the VMCPU.
2106 */
2107VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2108{
2109 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2110 pVM,
2111 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2112 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2113 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2114 pVCpu->em.s.fForceRAW));
2115 VM_ASSERT_EMT(pVM);
2116 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2117 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2118 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2119 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2120
2121 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2122 if (rc == 0)
2123 {
2124 /*
2125 * Start the virtual time.
2126 */
2127 TMR3NotifyResume(pVM, pVCpu);
2128
2129 /*
2130 * The Outer Main Loop.
2131 */
2132 bool fFFDone = false;
2133
2134 /* Reschedule right away to start in the right state. */
2135 rc = VINF_SUCCESS;
2136
2137 /* If resuming after a pause or a state load, restore the previous
2138 state or else we'll start executing code. Else, just reschedule. */
2139 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2140 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2141 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2142 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2143 else
2144 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2145 pVCpu->em.s.cIemThenRemInstructions = 0;
2146 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2147
2148 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2149 for (;;)
2150 {
2151 /*
2152 * Before we can schedule anything (we're here because
2153 * scheduling is required) we must service any pending
2154 * forced actions to avoid any pending action causing
2155 * immediate rescheduling upon entering an inner loop
2156 *
2157 * Do forced actions.
2158 */
2159 if ( !fFFDone
2160 && RT_SUCCESS(rc)
2161 && rc != VINF_EM_TERMINATE
2162 && rc != VINF_EM_OFF
2163 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2164 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2165 {
2166 rc = emR3ForcedActions(pVM, pVCpu, rc);
2167 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2168 if ( ( rc == VINF_EM_RESCHEDULE_REM
2169 || rc == VINF_EM_RESCHEDULE_HM)
2170 && pVCpu->em.s.fForceRAW)
2171 rc = VINF_EM_RESCHEDULE_RAW;
2172 }
2173 else if (fFFDone)
2174 fFFDone = false;
2175
2176 /*
2177 * Now what to do?
2178 */
2179 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2180 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2181 switch (rc)
2182 {
2183 /*
2184 * Keep doing what we're currently doing.
2185 */
2186 case VINF_SUCCESS:
2187 break;
2188
2189 /*
2190 * Reschedule - to raw-mode execution.
2191 */
2192 case VINF_EM_RESCHEDULE_RAW:
2193 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2194 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2195 pVCpu->em.s.enmState = EMSTATE_RAW;
2196 break;
2197
2198 /*
2199 * Reschedule - to hardware accelerated raw-mode execution.
2200 */
2201 case VINF_EM_RESCHEDULE_HM:
2202 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2203 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2204 Assert(!pVCpu->em.s.fForceRAW);
2205 pVCpu->em.s.enmState = EMSTATE_HM;
2206 break;
2207
2208 /*
2209 * Reschedule - to recompiled execution.
2210 */
2211 case VINF_EM_RESCHEDULE_REM:
2212 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2213 if (HMIsEnabled(pVM))
2214 {
2215 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2216 enmOldState, EMSTATE_IEM_THEN_REM));
2217 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2218 {
2219 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2220 pVCpu->em.s.cIemThenRemInstructions = 0;
2221 }
2222 }
2223 else
2224 {
2225 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2226 pVCpu->em.s.enmState = EMSTATE_REM;
2227 }
2228 break;
2229
2230 /*
2231 * Resume.
2232 */
2233 case VINF_EM_RESUME:
2234 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2235 /* Don't reschedule in the halted or wait for SIPI case. */
2236 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2237 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2238 {
2239 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2240 break;
2241 }
2242 /* fall through and get scheduled. */
2243
2244 /*
2245 * Reschedule.
2246 */
2247 case VINF_EM_RESCHEDULE:
2248 {
2249 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2250 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2251 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2252 pVCpu->em.s.cIemThenRemInstructions = 0;
2253 pVCpu->em.s.enmState = enmState;
2254 break;
2255 }
2256
2257 /*
2258 * Halted.
2259 */
2260 case VINF_EM_HALT:
2261 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2262 pVCpu->em.s.enmState = EMSTATE_HALTED;
2263 break;
2264
2265 /*
2266 * Switch to the wait for SIPI state (application processor only)
2267 */
2268 case VINF_EM_WAIT_SIPI:
2269 Assert(pVCpu->idCpu != 0);
2270 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2271 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2272 break;
2273
2274
2275 /*
2276 * Suspend.
2277 */
2278 case VINF_EM_SUSPEND:
2279 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2280 Assert(enmOldState != EMSTATE_SUSPENDED);
2281 pVCpu->em.s.enmPrevState = enmOldState;
2282 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2283 break;
2284
2285 /*
2286 * Reset.
2287 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2288 */
2289 case VINF_EM_RESET:
2290 {
2291 if (pVCpu->idCpu == 0)
2292 {
2293 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2294 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2295 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2296 pVCpu->em.s.cIemThenRemInstructions = 0;
2297 pVCpu->em.s.enmState = enmState;
2298 }
2299 else
2300 {
2301 /* All other VCPUs go into the wait for SIPI state. */
2302 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2303 }
2304 break;
2305 }
2306
2307 /*
2308 * Power Off.
2309 */
2310 case VINF_EM_OFF:
2311 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2312 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2313 TMR3NotifySuspend(pVM, pVCpu);
2314 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2315 return rc;
2316
2317 /*
2318 * Terminate the VM.
2319 */
2320 case VINF_EM_TERMINATE:
2321 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2322 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2323 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2324 TMR3NotifySuspend(pVM, pVCpu);
2325 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2326 return rc;
2327
2328
2329 /*
2330 * Out of memory, suspend the VM and stuff.
2331 */
2332 case VINF_EM_NO_MEMORY:
2333 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2334 Assert(enmOldState != EMSTATE_SUSPENDED);
2335 pVCpu->em.s.enmPrevState = enmOldState;
2336 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2337 TMR3NotifySuspend(pVM, pVCpu);
2338 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2339
2340 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2341 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2342 if (rc != VINF_EM_SUSPEND)
2343 {
2344 if (RT_SUCCESS_NP(rc))
2345 {
2346 AssertLogRelMsgFailed(("%Rrc\n", rc));
2347 rc = VERR_EM_INTERNAL_ERROR;
2348 }
2349 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2350 }
2351 return rc;
2352
2353 /*
2354 * Guest debug events.
2355 */
2356 case VINF_EM_DBG_STEPPED:
2357 case VINF_EM_DBG_STOP:
2358 case VINF_EM_DBG_BREAKPOINT:
2359 case VINF_EM_DBG_STEP:
2360 if (enmOldState == EMSTATE_RAW)
2361 {
2362 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2363 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2364 }
2365 else if (enmOldState == EMSTATE_HM)
2366 {
2367 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2368 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2369 }
2370 else if (enmOldState == EMSTATE_REM)
2371 {
2372 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2373 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2374 }
2375 else
2376 {
2377 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2378 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2379 }
2380 break;
2381
2382 /*
2383 * Hypervisor debug events.
2384 */
2385 case VINF_EM_DBG_HYPER_STEPPED:
2386 case VINF_EM_DBG_HYPER_BREAKPOINT:
2387 case VINF_EM_DBG_HYPER_ASSERTION:
2388 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2389 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2390 break;
2391
2392 /*
2393 * Triple fault.
2394 */
2395 case VINF_EM_TRIPLE_FAULT:
2396 if (!pVM->em.s.fGuruOnTripleFault)
2397 {
2398 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2399 Assert(pVM->cCpus == 1);
2400 REMR3Reset(pVM);
2401 PGMR3ResetCpu(pVM, pVCpu);
2402 TRPMR3ResetCpu(pVCpu);
2403 CPUMR3ResetCpu(pVM, pVCpu);
2404 EMR3ResetCpu(pVCpu);
2405 HMR3ResetCpu(pVCpu);
2406 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2407 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d\n", enmOldState, pVCpu->em.s.enmState));
2408 break;
2409 }
2410 /* Else fall through and trigger a guru. */
2411 case VERR_VMM_RING0_ASSERTION:
2412 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2413 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2414 break;
2415
2416 /*
2417 * Any error code showing up here other than the ones we
2418 * know and process above are considered to be FATAL.
2419 *
2420 * Unknown warnings and informational status codes are also
2421 * included in this.
2422 */
2423 default:
2424 if (RT_SUCCESS_NP(rc))
2425 {
2426 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2427 rc = VERR_EM_INTERNAL_ERROR;
2428 }
2429 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2430 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2431 break;
2432 }
2433
2434 /*
2435 * Act on state transition.
2436 */
2437 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2438 if (enmOldState != enmNewState)
2439 {
2440 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2441
2442 /* Clear MWait flags. */
2443 if ( enmOldState == EMSTATE_HALTED
2444 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2445 && ( enmNewState == EMSTATE_RAW
2446 || enmNewState == EMSTATE_HM
2447 || enmNewState == EMSTATE_REM
2448 || enmNewState == EMSTATE_IEM_THEN_REM
2449 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2450 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2451 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2452 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2453 {
2454 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2455 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2456 }
2457 }
2458 else
2459 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2460
2461 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2462 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2463
2464 /*
2465 * Act on the new state.
2466 */
2467 switch (enmNewState)
2468 {
2469 /*
2470 * Execute raw.
2471 */
2472 case EMSTATE_RAW:
2473#ifdef VBOX_WITH_RAW_MODE
2474 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2475#else
2476 AssertLogRelMsgFailed(("%Rrc\n", rc));
2477 rc = VERR_EM_INTERNAL_ERROR;
2478#endif
2479 break;
2480
2481 /*
2482 * Execute hardware accelerated raw.
2483 */
2484 case EMSTATE_HM:
2485 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2486 break;
2487
2488 /*
2489 * Execute recompiled.
2490 */
2491 case EMSTATE_REM:
2492 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2493 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2494 break;
2495
2496 /*
2497 * Execute in the interpreter.
2498 */
2499 case EMSTATE_IEM:
2500 {
2501#if 0 /* For testing purposes. */
2502 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2503 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2504 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2505 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2506 rc = VINF_SUCCESS;
2507 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2508#endif
2509 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
2510 if (pVM->em.s.fIemExecutesAll)
2511 {
2512 Assert(rc != VINF_EM_RESCHEDULE_REM);
2513 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2514 Assert(rc != VINF_EM_RESCHEDULE_HM);
2515 }
2516 fFFDone = false;
2517 break;
2518 }
2519
2520 /*
2521 * Execute in IEM, hoping we can quickly switch aback to HM
2522 * or RAW execution. If our hopes fail, we go to REM.
2523 */
2524 case EMSTATE_IEM_THEN_REM:
2525 {
2526 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2527 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2528 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2529 break;
2530 }
2531
2532 /*
2533 * Application processor execution halted until SIPI.
2534 */
2535 case EMSTATE_WAIT_SIPI:
2536 /* no break */
2537 /*
2538 * hlt - execution halted until interrupt.
2539 */
2540 case EMSTATE_HALTED:
2541 {
2542 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2543 /* If HM (or someone else) store a pending interrupt in
2544 TRPM, it must be dispatched ASAP without any halting.
2545 Anything pending in TRPM has been accepted and the CPU
2546 should already be the right state to receive it. */
2547 if (TRPMHasTrap(pVCpu))
2548 rc = VINF_EM_RESCHEDULE;
2549 /* MWAIT has a special extension where it's woken up when
2550 an interrupt is pending even when IF=0. */
2551 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2552 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2553 {
2554 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2555 if ( rc == VINF_SUCCESS
2556 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2557 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2558 {
2559 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2560 rc = VINF_EM_RESCHEDULE;
2561 }
2562 }
2563 else
2564 {
2565 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2566 if ( rc == VINF_SUCCESS
2567 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2568 {
2569 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2570 rc = VINF_EM_RESCHEDULE;
2571 }
2572 }
2573
2574 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2575 break;
2576 }
2577
2578 /*
2579 * Suspended - return to VM.cpp.
2580 */
2581 case EMSTATE_SUSPENDED:
2582 TMR3NotifySuspend(pVM, pVCpu);
2583 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2584 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2585 return VINF_EM_SUSPEND;
2586
2587 /*
2588 * Debugging in the guest.
2589 */
2590 case EMSTATE_DEBUG_GUEST_RAW:
2591 case EMSTATE_DEBUG_GUEST_HM:
2592 case EMSTATE_DEBUG_GUEST_IEM:
2593 case EMSTATE_DEBUG_GUEST_REM:
2594 TMR3NotifySuspend(pVM, pVCpu);
2595 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2596 TMR3NotifyResume(pVM, pVCpu);
2597 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2598 break;
2599
2600 /*
2601 * Debugging in the hypervisor.
2602 */
2603 case EMSTATE_DEBUG_HYPER:
2604 {
2605 TMR3NotifySuspend(pVM, pVCpu);
2606 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2607
2608 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2609 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2610 if (rc != VINF_SUCCESS)
2611 {
2612 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2613 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2614 else
2615 {
2616 /* switch to guru meditation mode */
2617 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2618 VMMR3FatalDump(pVM, pVCpu, rc);
2619 }
2620 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2621 return rc;
2622 }
2623
2624 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2625 TMR3NotifyResume(pVM, pVCpu);
2626 break;
2627 }
2628
2629 /*
2630 * Guru meditation takes place in the debugger.
2631 */
2632 case EMSTATE_GURU_MEDITATION:
2633 {
2634 TMR3NotifySuspend(pVM, pVCpu);
2635 VMMR3FatalDump(pVM, pVCpu, rc);
2636 emR3Debug(pVM, pVCpu, rc);
2637 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2638 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2639 return rc;
2640 }
2641
2642 /*
2643 * The states we don't expect here.
2644 */
2645 case EMSTATE_NONE:
2646 case EMSTATE_TERMINATING:
2647 default:
2648 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2649 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2650 TMR3NotifySuspend(pVM, pVCpu);
2651 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2652 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2653 return VERR_EM_INTERNAL_ERROR;
2654 }
2655 } /* The Outer Main Loop */
2656 }
2657 else
2658 {
2659 /*
2660 * Fatal error.
2661 */
2662 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2663 TMR3NotifySuspend(pVM, pVCpu);
2664 VMMR3FatalDump(pVM, pVCpu, rc);
2665 emR3Debug(pVM, pVCpu, rc);
2666 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2667 /** @todo change the VM state! */
2668 return rc;
2669 }
2670
2671 /* (won't ever get here). */
2672 AssertFailed();
2673}
2674
2675/**
2676 * Notify EM of a state change (used by FTM)
2677 *
2678 * @param pVM Pointer to the VM.
2679 */
2680VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2681{
2682 PVMCPU pVCpu = VMMGetCpu(pVM);
2683
2684 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2685 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2686 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2687 return VINF_SUCCESS;
2688}
2689
2690/**
2691 * Notify EM of a state change (used by FTM)
2692 *
2693 * @param pVM Pointer to the VM.
2694 */
2695VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2696{
2697 PVMCPU pVCpu = VMMGetCpu(pVM);
2698 EMSTATE enmCurState = pVCpu->em.s.enmState;
2699
2700 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2701 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2702 pVCpu->em.s.enmPrevState = enmCurState;
2703 return VINF_SUCCESS;
2704}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette