VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 54829

Last change on this file since 54829 was 54829, checked in by vboxsync, 10 years ago

VMM/EM: redundant include.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 122.3 KB
Line 
1/* $Id: EM.cpp 54829 2015-03-18 14:28:10Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/patm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/iom.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/pgm.h>
48#ifdef VBOX_WITH_REM
49# include <VBox/vmm/rem.h>
50#endif
51#include <VBox/vmm/tm.h>
52#include <VBox/vmm/mm.h>
53#include <VBox/vmm/ssm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/pdmqueue.h>
57#include <VBox/vmm/hm.h>
58#include <VBox/vmm/patm.h>
59#include "EMInternal.h"
60#include <VBox/vmm/vm.h>
61#include <VBox/vmm/uvm.h>
62#include <VBox/vmm/cpumdis.h>
63#include <VBox/dis.h>
64#include <VBox/disopcode.h>
65#include "VMMTracing.h"
66
67#include <iprt/asm.h>
68#include <iprt/string.h>
69#include <iprt/stream.h>
70#include <iprt/thread.h>
71
72
73/*******************************************************************************
74* Defined Constants And Macros *
75*******************************************************************************/
76#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
77#define EM_NOTIFY_HM
78#endif
79
80
81/*******************************************************************************
82* Internal Functions *
83*******************************************************************************/
84static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
85static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
86#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
87static const char *emR3GetStateName(EMSTATE enmState);
88#endif
89static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
90static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
91static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
92int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
93
94
95/**
96 * Initializes the EM.
97 *
98 * @returns VBox status code.
99 * @param pVM Pointer to the VM.
100 */
101VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
102{
103 LogFlow(("EMR3Init\n"));
104 /*
105 * Assert alignment and sizes.
106 */
107 AssertCompileMemberAlignment(VM, em.s, 32);
108 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
109 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
110
111 /*
112 * Init the structure.
113 */
114 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
115 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
116 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
117
118 bool fEnabled;
119 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
120 AssertLogRelRCReturn(rc, rc);
121 pVM->fRecompileUser = !fEnabled;
122
123 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileSupervisor = !fEnabled;
126
127#ifdef VBOX_WITH_RAW_RING1
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
129 AssertLogRelRCReturn(rc, rc);
130#else
131 pVM->fRawRing1Enabled = false; /* Disabled by default. */
132#endif
133
134 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
135 AssertLogRelRCReturn(rc, rc);
136
137 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
138 AssertLogRelRCReturn(rc, rc);
139 pVM->em.s.fGuruOnTripleFault = !fEnabled;
140 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
141 {
142 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
143 pVM->em.s.fGuruOnTripleFault = true;
144 }
145
146 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
147 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
148
149#ifdef VBOX_WITH_REM
150 /*
151 * Initialize the REM critical section.
152 */
153 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
154 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
155 AssertRCReturn(rc, rc);
156#endif
157
158 /*
159 * Saved state.
160 */
161 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
162 NULL, NULL, NULL,
163 NULL, emR3Save, NULL,
164 NULL, emR3Load, NULL);
165 if (RT_FAILURE(rc))
166 return rc;
167
168 for (VMCPUID i = 0; i < pVM->cCpus; i++)
169 {
170 PVMCPU pVCpu = &pVM->aCpus[i];
171
172 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
173 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
174 pVCpu->em.s.fForceRAW = false;
175
176 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
177#ifdef VBOX_WITH_RAW_MODE
178 if (!HMIsEnabled(pVM))
179 {
180 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
181 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
182 }
183#endif
184
185 /* Force reset of the time slice. */
186 pVCpu->em.s.u64TimeSliceStart = 0;
187
188# define EM_REG_COUNTER(a, b, c) \
189 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
190 AssertRC(rc);
191
192# define EM_REG_COUNTER_USED(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_PROFILE(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE_ADV(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204 /*
205 * Statistics.
206 */
207#ifdef VBOX_WITH_STATISTICS
208 PEMSTATS pStats;
209 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
210 if (RT_FAILURE(rc))
211 return rc;
212
213 pVCpu->em.s.pStatsR3 = pStats;
214 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
215 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
216
217 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
218 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
219
220 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
221 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
222
223 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
297
298 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
299 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
300
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
353
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
382
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
387
388 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
389# ifdef VBOX_WITH_FIRST_IEM_STEP
390 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
391# else
392 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
393 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
394# endif
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
430 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
433 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
435 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
436
437#endif /* VBOX_WITH_STATISTICS */
438
439 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
440 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
441 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
442 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
443 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
444
445 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
446 }
447
448 emR3InitDbg(pVM);
449 return VINF_SUCCESS;
450}
451
452
453/**
454 * Applies relocations to data and code managed by this
455 * component. This function will be called at init and
456 * whenever the VMM need to relocate it self inside the GC.
457 *
458 * @param pVM Pointer to the VM.
459 */
460VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
461{
462 LogFlow(("EMR3Relocate\n"));
463 for (VMCPUID i = 0; i < pVM->cCpus; i++)
464 {
465 PVMCPU pVCpu = &pVM->aCpus[i];
466 if (pVCpu->em.s.pStatsR3)
467 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
468 }
469}
470
471
472/**
473 * Reset the EM state for a CPU.
474 *
475 * Called by EMR3Reset and hot plugging.
476 *
477 * @param pVCpu Pointer to the VMCPU.
478 */
479VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
480{
481 pVCpu->em.s.fForceRAW = false;
482
483 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
484 out of the HALTED state here so that enmPrevState doesn't end up as
485 HALTED when EMR3Execute returns. */
486 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
487 {
488 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
489 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
490 }
491}
492
493
494/**
495 * Reset notification.
496 *
497 * @param pVM Pointer to the VM.
498 */
499VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
500{
501 Log(("EMR3Reset: \n"));
502 for (VMCPUID i = 0; i < pVM->cCpus; i++)
503 EMR3ResetCpu(&pVM->aCpus[i]);
504}
505
506
507/**
508 * Terminates the EM.
509 *
510 * Termination means cleaning up and freeing all resources,
511 * the VM it self is at this point powered off or suspended.
512 *
513 * @returns VBox status code.
514 * @param pVM Pointer to the VM.
515 */
516VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
517{
518 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
519
520#ifdef VBOX_WITH_REM
521 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
522#endif
523 return VINF_SUCCESS;
524}
525
526
527/**
528 * Execute state save operation.
529 *
530 * @returns VBox status code.
531 * @param pVM Pointer to the VM.
532 * @param pSSM SSM operation handle.
533 */
534static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
535{
536 for (VMCPUID i = 0; i < pVM->cCpus; i++)
537 {
538 PVMCPU pVCpu = &pVM->aCpus[i];
539
540 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
541 AssertRCReturn(rc, rc);
542
543 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
544 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
545 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
546 AssertRCReturn(rc, rc);
547
548 /* Save mwait state. */
549 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
550 AssertRCReturn(rc, rc);
551 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
552 AssertRCReturn(rc, rc);
553 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
554 AssertRCReturn(rc, rc);
555 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
556 AssertRCReturn(rc, rc);
557 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
558 AssertRCReturn(rc, rc);
559 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
560 AssertRCReturn(rc, rc);
561 }
562 return VINF_SUCCESS;
563}
564
565
566/**
567 * Execute state load operation.
568 *
569 * @returns VBox status code.
570 * @param pVM Pointer to the VM.
571 * @param pSSM SSM operation handle.
572 * @param uVersion Data layout version.
573 * @param uPass The data pass.
574 */
575static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
576{
577 /*
578 * Validate version.
579 */
580 if ( uVersion > EM_SAVED_STATE_VERSION
581 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
582 {
583 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
584 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
585 }
586 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
587
588 /*
589 * Load the saved state.
590 */
591 for (VMCPUID i = 0; i < pVM->cCpus; i++)
592 {
593 PVMCPU pVCpu = &pVM->aCpus[i];
594
595 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
596 if (RT_FAILURE(rc))
597 pVCpu->em.s.fForceRAW = false;
598 AssertRCReturn(rc, rc);
599
600 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
601 {
602 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
603 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
604 AssertRCReturn(rc, rc);
605 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
606
607 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
608 }
609 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
610 {
611 /* Load mwait state. */
612 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
613 AssertRCReturn(rc, rc);
614 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
615 AssertRCReturn(rc, rc);
616 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
617 AssertRCReturn(rc, rc);
618 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
619 AssertRCReturn(rc, rc);
620 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
621 AssertRCReturn(rc, rc);
622 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
623 AssertRCReturn(rc, rc);
624 }
625
626 Assert(!pVCpu->em.s.pCliStatTree);
627 }
628 return VINF_SUCCESS;
629}
630
631
632/**
633 * Argument packet for emR3SetExecutionPolicy.
634 */
635struct EMR3SETEXECPOLICYARGS
636{
637 EMEXECPOLICY enmPolicy;
638 bool fEnforce;
639};
640
641
642/**
643 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
644 */
645static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
646{
647 /*
648 * Only the first CPU changes the variables.
649 */
650 if (pVCpu->idCpu == 0)
651 {
652 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
653 switch (pArgs->enmPolicy)
654 {
655 case EMEXECPOLICY_RECOMPILE_RING0:
656 pVM->fRecompileSupervisor = pArgs->fEnforce;
657 break;
658 case EMEXECPOLICY_RECOMPILE_RING3:
659 pVM->fRecompileUser = pArgs->fEnforce;
660 break;
661 case EMEXECPOLICY_IEM_ALL:
662 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
663 break;
664 default:
665 AssertFailedReturn(VERR_INVALID_PARAMETER);
666 }
667 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
668 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
669 }
670
671 /*
672 * Force rescheduling if in RAW, HM, IEM, or REM.
673 */
674 return pVCpu->em.s.enmState == EMSTATE_RAW
675 || pVCpu->em.s.enmState == EMSTATE_HM
676 || pVCpu->em.s.enmState == EMSTATE_IEM
677 || pVCpu->em.s.enmState == EMSTATE_REM
678 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
679 ? VINF_EM_RESCHEDULE
680 : VINF_SUCCESS;
681}
682
683
684/**
685 * Changes an execution scheduling policy parameter.
686 *
687 * This is used to enable or disable raw-mode / hardware-virtualization
688 * execution of user and supervisor code.
689 *
690 * @returns VINF_SUCCESS on success.
691 * @returns VINF_RESCHEDULE if a rescheduling might be required.
692 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
693 *
694 * @param pUVM The user mode VM handle.
695 * @param enmPolicy The scheduling policy to change.
696 * @param fEnforce Whether to enforce the policy or not.
697 */
698VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
699{
700 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
701 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
702 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
703
704 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
705 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
706}
707
708
709/**
710 * Queries an execution scheduling policy parameter.
711 *
712 * @returns VBox status code
713 * @param pUVM The user mode VM handle.
714 * @param enmPolicy The scheduling policy to query.
715 * @param pfEnforced Where to return the current value.
716 */
717VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
718{
719 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
720 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
721 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
722 PVM pVM = pUVM->pVM;
723 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
724
725 /* No need to bother EMTs with a query. */
726 switch (enmPolicy)
727 {
728 case EMEXECPOLICY_RECOMPILE_RING0:
729 *pfEnforced = pVM->fRecompileSupervisor;
730 break;
731 case EMEXECPOLICY_RECOMPILE_RING3:
732 *pfEnforced = pVM->fRecompileUser;
733 break;
734 case EMEXECPOLICY_IEM_ALL:
735 *pfEnforced = pVM->em.s.fIemExecutesAll;
736 break;
737 default:
738 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
739 }
740
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Raise a fatal error.
747 *
748 * Safely terminate the VM with full state report and stuff. This function
749 * will naturally never return.
750 *
751 * @param pVCpu Pointer to the VMCPU.
752 * @param rc VBox status code.
753 */
754VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
755{
756 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
757 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
758 AssertReleaseMsgFailed(("longjmp returned!\n"));
759}
760
761
762#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
763/**
764 * Gets the EM state name.
765 *
766 * @returns pointer to read only state name,
767 * @param enmState The state.
768 */
769static const char *emR3GetStateName(EMSTATE enmState)
770{
771 switch (enmState)
772 {
773 case EMSTATE_NONE: return "EMSTATE_NONE";
774 case EMSTATE_RAW: return "EMSTATE_RAW";
775 case EMSTATE_HM: return "EMSTATE_HM";
776 case EMSTATE_IEM: return "EMSTATE_IEM";
777 case EMSTATE_REM: return "EMSTATE_REM";
778 case EMSTATE_HALTED: return "EMSTATE_HALTED";
779 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
780 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
781 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
782 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
783 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
784 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
785 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
786 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
787 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
788 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
789 default: return "Unknown!";
790 }
791}
792#endif /* LOG_ENABLED || VBOX_STRICT */
793
794
795/**
796 * Debug loop.
797 *
798 * @returns VBox status code for EM.
799 * @param pVM Pointer to the VM.
800 * @param pVCpu Pointer to the VMCPU.
801 * @param rc Current EM VBox status code.
802 */
803static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
804{
805 for (;;)
806 {
807 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
808 const VBOXSTRICTRC rcLast = rc;
809
810 /*
811 * Debug related RC.
812 */
813 switch (VBOXSTRICTRC_VAL(rc))
814 {
815 /*
816 * Single step an instruction.
817 */
818 case VINF_EM_DBG_STEP:
819 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
820 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
821 || pVCpu->em.s.fForceRAW /* paranoia */)
822#ifdef VBOX_WITH_RAW_MODE
823 rc = emR3RawStep(pVM, pVCpu);
824#else
825 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
826#endif
827 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
828 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
829#ifdef VBOX_WITH_REM
830 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
831 rc = emR3RemStep(pVM, pVCpu);
832#endif
833 else
834 {
835 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
836 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
837 rc = VINF_EM_DBG_STEPPED;
838 }
839 break;
840
841 /*
842 * Simple events: stepped, breakpoint, stop/assertion.
843 */
844 case VINF_EM_DBG_STEPPED:
845 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
846 break;
847
848 case VINF_EM_DBG_BREAKPOINT:
849 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
850 break;
851
852 case VINF_EM_DBG_STOP:
853 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
854 break;
855
856 case VINF_EM_DBG_HYPER_STEPPED:
857 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
858 break;
859
860 case VINF_EM_DBG_HYPER_BREAKPOINT:
861 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
862 break;
863
864 case VINF_EM_DBG_HYPER_ASSERTION:
865 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
866 RTLogFlush(NULL);
867 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
868 break;
869
870 /*
871 * Guru meditation.
872 */
873 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
874 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
875 break;
876 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
877 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
878 break;
879
880 default: /** @todo don't use default for guru, but make special errors code! */
881 {
882 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
883 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
884 break;
885 }
886 }
887
888 /*
889 * Process the result.
890 */
891 do
892 {
893 switch (VBOXSTRICTRC_VAL(rc))
894 {
895 /*
896 * Continue the debugging loop.
897 */
898 case VINF_EM_DBG_STEP:
899 case VINF_EM_DBG_STOP:
900 case VINF_EM_DBG_STEPPED:
901 case VINF_EM_DBG_BREAKPOINT:
902 case VINF_EM_DBG_HYPER_STEPPED:
903 case VINF_EM_DBG_HYPER_BREAKPOINT:
904 case VINF_EM_DBG_HYPER_ASSERTION:
905 break;
906
907 /*
908 * Resuming execution (in some form) has to be done here if we got
909 * a hypervisor debug event.
910 */
911 case VINF_SUCCESS:
912 case VINF_EM_RESUME:
913 case VINF_EM_SUSPEND:
914 case VINF_EM_RESCHEDULE:
915 case VINF_EM_RESCHEDULE_RAW:
916 case VINF_EM_RESCHEDULE_REM:
917 case VINF_EM_HALT:
918 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
919 {
920#ifdef VBOX_WITH_RAW_MODE
921 rc = emR3RawResumeHyper(pVM, pVCpu);
922 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
923 continue;
924#else
925 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
926#endif
927 }
928 if (rc == VINF_SUCCESS)
929 rc = VINF_EM_RESCHEDULE;
930 return rc;
931
932 /*
933 * The debugger isn't attached.
934 * We'll simply turn the thing off since that's the easiest thing to do.
935 */
936 case VERR_DBGF_NOT_ATTACHED:
937 switch (VBOXSTRICTRC_VAL(rcLast))
938 {
939 case VINF_EM_DBG_HYPER_STEPPED:
940 case VINF_EM_DBG_HYPER_BREAKPOINT:
941 case VINF_EM_DBG_HYPER_ASSERTION:
942 case VERR_TRPM_PANIC:
943 case VERR_TRPM_DONT_PANIC:
944 case VERR_VMM_RING0_ASSERTION:
945 case VERR_VMM_HYPER_CR3_MISMATCH:
946 case VERR_VMM_RING3_CALL_DISABLED:
947 return rcLast;
948 }
949 return VINF_EM_OFF;
950
951 /*
952 * Status codes terminating the VM in one or another sense.
953 */
954 case VINF_EM_TERMINATE:
955 case VINF_EM_OFF:
956 case VINF_EM_RESET:
957 case VINF_EM_NO_MEMORY:
958 case VINF_EM_RAW_STALE_SELECTOR:
959 case VINF_EM_RAW_IRET_TRAP:
960 case VERR_TRPM_PANIC:
961 case VERR_TRPM_DONT_PANIC:
962 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
963 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
964 case VERR_VMM_RING0_ASSERTION:
965 case VERR_VMM_HYPER_CR3_MISMATCH:
966 case VERR_VMM_RING3_CALL_DISABLED:
967 case VERR_INTERNAL_ERROR:
968 case VERR_INTERNAL_ERROR_2:
969 case VERR_INTERNAL_ERROR_3:
970 case VERR_INTERNAL_ERROR_4:
971 case VERR_INTERNAL_ERROR_5:
972 case VERR_IPE_UNEXPECTED_STATUS:
973 case VERR_IPE_UNEXPECTED_INFO_STATUS:
974 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
975 return rc;
976
977 /*
978 * The rest is unexpected, and will keep us here.
979 */
980 default:
981 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
982 break;
983 }
984 } while (false);
985 } /* debug for ever */
986}
987
988
989/**
990 * Steps recompiled code.
991 *
992 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
993 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
994 *
995 * @param pVM Pointer to the VM.
996 * @param pVCpu Pointer to the VMCPU.
997 */
998static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
999{
1000 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1001
1002#ifdef VBOX_WITH_REM
1003 EMRemLock(pVM);
1004
1005 /*
1006 * Switch to REM, step instruction, switch back.
1007 */
1008 int rc = REMR3State(pVM, pVCpu);
1009 if (RT_SUCCESS(rc))
1010 {
1011 rc = REMR3Step(pVM, pVCpu);
1012 REMR3StateBack(pVM, pVCpu);
1013 }
1014 EMRemUnlock(pVM);
1015
1016#else
1017 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1018#endif
1019
1020 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1021 return rc;
1022}
1023
1024
1025/**
1026 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1027 * critical section.
1028 *
1029 * @returns false - new fInREMState value.
1030 * @param pVM Pointer to the VM.
1031 * @param pVCpu Pointer to the VMCPU.
1032 */
1033DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1034{
1035#ifdef VBOX_WITH_REM
1036 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1037 REMR3StateBack(pVM, pVCpu);
1038 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1039
1040 EMRemUnlock(pVM);
1041#endif
1042 return false;
1043}
1044
1045
1046/**
1047 * Executes recompiled code.
1048 *
1049 * This function contains the recompiler version of the inner
1050 * execution loop (the outer loop being in EMR3ExecuteVM()).
1051 *
1052 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1053 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1054 *
1055 * @param pVM Pointer to the VM.
1056 * @param pVCpu Pointer to the VMCPU.
1057 * @param pfFFDone Where to store an indicator telling whether or not
1058 * FFs were done before returning.
1059 *
1060 */
1061static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1062{
1063#ifdef LOG_ENABLED
1064 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1065 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1066
1067 if (pCtx->eflags.Bits.u1VM)
1068 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1069 else
1070 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1071#endif
1072 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1073
1074#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1075 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1076 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1077 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1078#endif
1079
1080 /*
1081 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1082 * or the REM suggests raw-mode execution.
1083 */
1084 *pfFFDone = false;
1085#ifdef VBOX_WITH_REM
1086 bool fInREMState = false;
1087#endif
1088 int rc = VINF_SUCCESS;
1089 for (;;)
1090 {
1091#ifdef VBOX_WITH_REM
1092 /*
1093 * Lock REM and update the state if not already in sync.
1094 *
1095 * Note! Big lock, but you are not supposed to own any lock when
1096 * coming in here.
1097 */
1098 if (!fInREMState)
1099 {
1100 EMRemLock(pVM);
1101 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1102
1103 /* Flush the recompiler translation blocks if the VCPU has changed,
1104 also force a full CPU state resync. */
1105 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1106 {
1107 REMFlushTBs(pVM);
1108 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1109 }
1110 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1111
1112 rc = REMR3State(pVM, pVCpu);
1113
1114 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1115 if (RT_FAILURE(rc))
1116 break;
1117 fInREMState = true;
1118
1119 /*
1120 * We might have missed the raising of VMREQ, TIMER and some other
1121 * important FFs while we were busy switching the state. So, check again.
1122 */
1123 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1124 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1125 {
1126 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1127 goto l_REMDoForcedActions;
1128 }
1129 }
1130#endif
1131
1132 /*
1133 * Execute REM.
1134 */
1135 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1136 {
1137 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1138#ifdef VBOX_WITH_REM
1139 rc = REMR3Run(pVM, pVCpu);
1140#else
1141 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
1142#endif
1143 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1144 }
1145 else
1146 {
1147 /* Give up this time slice; virtual time continues */
1148 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1149 RTThreadSleep(5);
1150 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1151 rc = VINF_SUCCESS;
1152 }
1153
1154 /*
1155 * Deal with high priority post execution FFs before doing anything
1156 * else. Sync back the state and leave the lock to be on the safe side.
1157 */
1158 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1159 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1160 {
1161#ifdef VBOX_WITH_REM
1162 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1163#endif
1164 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1165 }
1166
1167 /*
1168 * Process the returned status code.
1169 */
1170 if (rc != VINF_SUCCESS)
1171 {
1172 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1173 break;
1174 if (rc != VINF_REM_INTERRUPED_FF)
1175 {
1176 /*
1177 * Anything which is not known to us means an internal error
1178 * and the termination of the VM!
1179 */
1180 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1181 break;
1182 }
1183 }
1184
1185
1186 /*
1187 * Check and execute forced actions.
1188 *
1189 * Sync back the VM state and leave the lock before calling any of
1190 * these, you never know what's going to happen here.
1191 */
1192#ifdef VBOX_HIGH_RES_TIMERS_HACK
1193 TMTimerPollVoid(pVM, pVCpu);
1194#endif
1195 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1196 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1197 || VMCPU_FF_IS_PENDING(pVCpu,
1198 VMCPU_FF_ALL_REM_MASK
1199 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1200 {
1201l_REMDoForcedActions:
1202#ifdef VBOX_WITH_REM
1203 if (fInREMState)
1204 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1205#endif
1206 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1207 rc = emR3ForcedActions(pVM, pVCpu, rc);
1208 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1209 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1210 if ( rc != VINF_SUCCESS
1211 && rc != VINF_EM_RESCHEDULE_REM)
1212 {
1213 *pfFFDone = true;
1214 break;
1215 }
1216 }
1217
1218 } /* The Inner Loop, recompiled execution mode version. */
1219
1220
1221#ifdef VBOX_WITH_REM
1222 /*
1223 * Returning. Sync back the VM state if required.
1224 */
1225 if (fInREMState)
1226 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1227#endif
1228
1229 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1230 return rc;
1231}
1232
1233
1234#ifdef DEBUG
1235
1236int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1237{
1238 EMSTATE enmOldState = pVCpu->em.s.enmState;
1239
1240 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1241
1242 Log(("Single step BEGIN:\n"));
1243 for (uint32_t i = 0; i < cIterations; i++)
1244 {
1245 DBGFR3PrgStep(pVCpu);
1246 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1247 emR3RemStep(pVM, pVCpu);
1248 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1249 break;
1250 }
1251 Log(("Single step END:\n"));
1252 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1253 pVCpu->em.s.enmState = enmOldState;
1254 return VINF_EM_RESCHEDULE;
1255}
1256
1257#endif /* DEBUG */
1258
1259
1260/**
1261 * Try execute the problematic code in IEM first, then fall back on REM if there
1262 * is too much of it or if IEM doesn't implement something.
1263 *
1264 * @returns Strict VBox status code from IEMExecLots.
1265 * @param pVM The cross context VM structure.
1266 * @param pVCpu The cross context CPU structure for the calling EMT.
1267 * @param pfFFDone Force flags done indicator.
1268 *
1269 * @thread EMT(pVCpu)
1270 */
1271static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1272{
1273 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1274 *pfFFDone = false;
1275
1276 /*
1277 * Execute in IEM for a while.
1278 */
1279 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1280 {
1281 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu);
1282 if (rcStrict != VINF_SUCCESS)
1283 {
1284 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1285 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1286 break;
1287
1288 pVCpu->em.s.cIemThenRemInstructions++;
1289 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1290 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1291 return rcStrict;
1292 }
1293 pVCpu->em.s.cIemThenRemInstructions++;
1294
1295 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1296 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1297 {
1298 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1299 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1300 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1301 pVCpu->em.s.enmState = enmNewState;
1302 return VINF_SUCCESS;
1303 }
1304
1305 /*
1306 * Check for pending actions.
1307 */
1308 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1309 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1310 return VINF_SUCCESS;
1311 }
1312
1313 /*
1314 * Switch to REM.
1315 */
1316 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1317 pVCpu->em.s.enmState = EMSTATE_REM;
1318 return VINF_SUCCESS;
1319}
1320
1321
1322/**
1323 * Decides whether to execute RAW, HWACC or REM.
1324 *
1325 * @returns new EM state
1326 * @param pVM Pointer to the VM.
1327 * @param pVCpu Pointer to the VMCPU.
1328 * @param pCtx Pointer to the guest CPU context.
1329 */
1330EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1331{
1332 /*
1333 * When forcing raw-mode execution, things are simple.
1334 */
1335 if (pVCpu->em.s.fForceRAW)
1336 return EMSTATE_RAW;
1337
1338 /*
1339 * We stay in the wait for SIPI state unless explicitly told otherwise.
1340 */
1341 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1342 return EMSTATE_WAIT_SIPI;
1343
1344 /*
1345 * Execute everything in IEM?
1346 */
1347 if (pVM->em.s.fIemExecutesAll)
1348 return EMSTATE_IEM;
1349
1350 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1351 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1352 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1353
1354 X86EFLAGS EFlags = pCtx->eflags;
1355 if (HMIsEnabled(pVM))
1356 {
1357 /*
1358 * Hardware accelerated raw-mode:
1359 */
1360 if ( EMIsHwVirtExecutionEnabled(pVM)
1361 && HMR3CanExecuteGuest(pVM, pCtx))
1362 return EMSTATE_HM;
1363
1364 /*
1365 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1366 * turns off monitoring features essential for raw mode!
1367 */
1368#ifdef VBOX_WITH_FIRST_IEM_STEP
1369 return EMSTATE_IEM_THEN_REM;
1370#else
1371 return EMSTATE_REM;
1372#endif
1373 }
1374
1375 /*
1376 * Standard raw-mode:
1377 *
1378 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1379 * or 32 bits protected mode ring 0 code
1380 *
1381 * The tests are ordered by the likelihood of being true during normal execution.
1382 */
1383 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1384 {
1385 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1386 return EMSTATE_REM;
1387 }
1388
1389# ifndef VBOX_RAW_V86
1390 if (EFlags.u32 & X86_EFL_VM) {
1391 Log2(("raw mode refused: VM_MASK\n"));
1392 return EMSTATE_REM;
1393 }
1394# endif
1395
1396 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1397 uint32_t u32CR0 = pCtx->cr0;
1398 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1399 {
1400 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1401 return EMSTATE_REM;
1402 }
1403
1404 if (pCtx->cr4 & X86_CR4_PAE)
1405 {
1406 uint32_t u32Dummy, u32Features;
1407
1408 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1409 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1410 return EMSTATE_REM;
1411 }
1412
1413 unsigned uSS = pCtx->ss.Sel;
1414 if ( pCtx->eflags.Bits.u1VM
1415 || (uSS & X86_SEL_RPL) == 3)
1416 {
1417 if (!EMIsRawRing3Enabled(pVM))
1418 return EMSTATE_REM;
1419
1420 if (!(EFlags.u32 & X86_EFL_IF))
1421 {
1422 Log2(("raw mode refused: IF (RawR3)\n"));
1423 return EMSTATE_REM;
1424 }
1425
1426 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1427 {
1428 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1429 return EMSTATE_REM;
1430 }
1431 }
1432 else
1433 {
1434 if (!EMIsRawRing0Enabled(pVM))
1435 return EMSTATE_REM;
1436
1437 if (EMIsRawRing1Enabled(pVM))
1438 {
1439 /* Only ring 0 and 1 supervisor code. */
1440 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1441 {
1442 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1443 return EMSTATE_REM;
1444 }
1445 }
1446 /* Only ring 0 supervisor code. */
1447 else if ((uSS & X86_SEL_RPL) != 0)
1448 {
1449 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1450 return EMSTATE_REM;
1451 }
1452
1453 // Let's start with pure 32 bits ring 0 code first
1454 /** @todo What's pure 32-bit mode? flat? */
1455 if ( !(pCtx->ss.Attr.n.u1DefBig)
1456 || !(pCtx->cs.Attr.n.u1DefBig))
1457 {
1458 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1459 return EMSTATE_REM;
1460 }
1461
1462 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1463 if (!(u32CR0 & X86_CR0_WP))
1464 {
1465 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1466 return EMSTATE_REM;
1467 }
1468
1469# ifdef VBOX_WITH_RAW_MODE
1470 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1471 {
1472 Log2(("raw r0 mode forced: patch code\n"));
1473# ifdef VBOX_WITH_SAFE_STR
1474 Assert(pCtx->tr.Sel);
1475# endif
1476 return EMSTATE_RAW;
1477 }
1478# endif /* VBOX_WITH_RAW_MODE */
1479
1480# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1481 if (!(EFlags.u32 & X86_EFL_IF))
1482 {
1483 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1484 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1485 return EMSTATE_REM;
1486 }
1487# endif
1488
1489# ifndef VBOX_WITH_RAW_RING1
1490 /** @todo still necessary??? */
1491 if (EFlags.Bits.u2IOPL != 0)
1492 {
1493 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1494 return EMSTATE_REM;
1495 }
1496# endif
1497 }
1498
1499 /*
1500 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1501 */
1502 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1503 {
1504 Log2(("raw mode refused: stale CS\n"));
1505 return EMSTATE_REM;
1506 }
1507 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1508 {
1509 Log2(("raw mode refused: stale SS\n"));
1510 return EMSTATE_REM;
1511 }
1512 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1513 {
1514 Log2(("raw mode refused: stale DS\n"));
1515 return EMSTATE_REM;
1516 }
1517 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1518 {
1519 Log2(("raw mode refused: stale ES\n"));
1520 return EMSTATE_REM;
1521 }
1522 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1523 {
1524 Log2(("raw mode refused: stale FS\n"));
1525 return EMSTATE_REM;
1526 }
1527 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1528 {
1529 Log2(("raw mode refused: stale GS\n"));
1530 return EMSTATE_REM;
1531 }
1532
1533# ifdef VBOX_WITH_SAFE_STR
1534 if (pCtx->tr.Sel == 0)
1535 {
1536 Log(("Raw mode refused -> TR=0\n"));
1537 return EMSTATE_REM;
1538 }
1539# endif
1540
1541 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1542 return EMSTATE_RAW;
1543}
1544
1545
1546/**
1547 * Executes all high priority post execution force actions.
1548 *
1549 * @returns rc or a fatal status code.
1550 *
1551 * @param pVM Pointer to the VM.
1552 * @param pVCpu Pointer to the VMCPU.
1553 * @param rc The current rc.
1554 */
1555int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1556{
1557 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1558
1559 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1560 PDMCritSectBothFF(pVCpu);
1561
1562 /* Update CR3 (Nested Paging case for HM). */
1563 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1564 {
1565 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1566 if (RT_FAILURE(rc2))
1567 return rc2;
1568 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1569 }
1570
1571 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1572 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1573 {
1574 if (CPUMIsGuestInPAEMode(pVCpu))
1575 {
1576 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1577 AssertPtr(pPdpes);
1578
1579 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1580 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1581 }
1582 else
1583 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1584 }
1585
1586#ifdef VBOX_WITH_RAW_MODE
1587 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1588 CSAMR3DoPendingAction(pVM, pVCpu);
1589#endif
1590
1591 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1592 {
1593 if ( rc > VINF_EM_NO_MEMORY
1594 && rc <= VINF_EM_LAST)
1595 rc = VINF_EM_NO_MEMORY;
1596 }
1597
1598 return rc;
1599}
1600
1601
1602/**
1603 * Executes all pending forced actions.
1604 *
1605 * Forced actions can cause execution delays and execution
1606 * rescheduling. The first we deal with using action priority, so
1607 * that for instance pending timers aren't scheduled and ran until
1608 * right before execution. The rescheduling we deal with using
1609 * return codes. The same goes for VM termination, only in that case
1610 * we exit everything.
1611 *
1612 * @returns VBox status code of equal or greater importance/severity than rc.
1613 * The most important ones are: VINF_EM_RESCHEDULE,
1614 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1615 *
1616 * @param pVM Pointer to the VM.
1617 * @param pVCpu Pointer to the VMCPU.
1618 * @param rc The current rc.
1619 *
1620 */
1621int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1622{
1623 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1624#ifdef VBOX_STRICT
1625 int rcIrq = VINF_SUCCESS;
1626#endif
1627 int rc2;
1628#define UPDATE_RC() \
1629 do { \
1630 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1631 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1632 break; \
1633 if (!rc || rc2 < rc) \
1634 rc = rc2; \
1635 } while (0)
1636 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1637
1638 /*
1639 * Post execution chunk first.
1640 */
1641 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1642 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1643 {
1644 /*
1645 * EMT Rendezvous (must be serviced before termination).
1646 */
1647 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1648 {
1649 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1650 UPDATE_RC();
1651 /** @todo HACK ALERT! The following test is to make sure EM+TM
1652 * thinks the VM is stopped/reset before the next VM state change
1653 * is made. We need a better solution for this, or at least make it
1654 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1655 * VINF_EM_SUSPEND). */
1656 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1657 {
1658 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1659 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1660 return rc;
1661 }
1662 }
1663
1664 /*
1665 * State change request (cleared by vmR3SetStateLocked).
1666 */
1667 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1668 {
1669 VMSTATE enmState = VMR3GetState(pVM);
1670 switch (enmState)
1671 {
1672 case VMSTATE_FATAL_ERROR:
1673 case VMSTATE_FATAL_ERROR_LS:
1674 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1675 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1676 return VINF_EM_SUSPEND;
1677
1678 case VMSTATE_DESTROYING:
1679 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1680 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1681 return VINF_EM_TERMINATE;
1682
1683 default:
1684 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1685 }
1686 }
1687
1688 /*
1689 * Debugger Facility polling.
1690 */
1691 if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF))
1692 {
1693 rc2 = DBGFR3VMMForcedAction(pVM);
1694 UPDATE_RC();
1695 }
1696
1697 /*
1698 * Postponed reset request.
1699 */
1700 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1701 {
1702 rc2 = VMR3Reset(pVM->pUVM);
1703 UPDATE_RC();
1704 }
1705
1706#ifdef VBOX_WITH_RAW_MODE
1707 /*
1708 * CSAM page scanning.
1709 */
1710 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1711 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1712 {
1713 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1714
1715 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1716 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1717
1718 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1719 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1720 }
1721#endif
1722
1723 /*
1724 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1725 */
1726 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1727 {
1728 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1729 UPDATE_RC();
1730 if (rc == VINF_EM_NO_MEMORY)
1731 return rc;
1732 }
1733
1734 /* check that we got them all */
1735 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1736 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0));
1737 }
1738
1739 /*
1740 * Normal priority then.
1741 * (Executed in no particular order.)
1742 */
1743 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1744 {
1745 /*
1746 * PDM Queues are pending.
1747 */
1748 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1749 PDMR3QueueFlushAll(pVM);
1750
1751 /*
1752 * PDM DMA transfers are pending.
1753 */
1754 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1755 PDMR3DmaRun(pVM);
1756
1757 /*
1758 * EMT Rendezvous (make sure they are handled before the requests).
1759 */
1760 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1761 {
1762 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1763 UPDATE_RC();
1764 /** @todo HACK ALERT! The following test is to make sure EM+TM
1765 * thinks the VM is stopped/reset before the next VM state change
1766 * is made. We need a better solution for this, or at least make it
1767 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1768 * VINF_EM_SUSPEND). */
1769 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1770 {
1771 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1772 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1773 return rc;
1774 }
1775 }
1776
1777 /*
1778 * Requests from other threads.
1779 */
1780 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1781 {
1782 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1783 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1784 {
1785 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1786 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1787 return rc2;
1788 }
1789 UPDATE_RC();
1790 /** @todo HACK ALERT! The following test is to make sure EM+TM
1791 * thinks the VM is stopped/reset before the next VM state change
1792 * is made. We need a better solution for this, or at least make it
1793 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1794 * VINF_EM_SUSPEND). */
1795 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1796 {
1797 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1798 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1799 return rc;
1800 }
1801 }
1802
1803#ifdef VBOX_WITH_REM
1804 /* Replay the handler notification changes. */
1805 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1806 {
1807 /* Try not to cause deadlocks. */
1808 if ( pVM->cCpus == 1
1809 || ( !PGMIsLockOwner(pVM)
1810 && !IOMIsLockWriteOwner(pVM))
1811 )
1812 {
1813 EMRemLock(pVM);
1814 REMR3ReplayHandlerNotifications(pVM);
1815 EMRemUnlock(pVM);
1816 }
1817 }
1818#endif
1819
1820 /* check that we got them all */
1821 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1822 }
1823
1824 /*
1825 * Normal priority then. (per-VCPU)
1826 * (Executed in no particular order.)
1827 */
1828 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1829 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1830 {
1831 /*
1832 * Requests from other threads.
1833 */
1834 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1835 {
1836 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1837 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1838 {
1839 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1840 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1841 return rc2;
1842 }
1843 UPDATE_RC();
1844 /** @todo HACK ALERT! The following test is to make sure EM+TM
1845 * thinks the VM is stopped/reset before the next VM state change
1846 * is made. We need a better solution for this, or at least make it
1847 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1848 * VINF_EM_SUSPEND). */
1849 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1850 {
1851 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1852 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1853 return rc;
1854 }
1855 }
1856
1857 /* check that we got them all */
1858 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1859 }
1860
1861 /*
1862 * High priority pre execution chunk last.
1863 * (Executed in ascending priority order.)
1864 */
1865 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1866 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1867 {
1868 /*
1869 * Timers before interrupts.
1870 */
1871 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1872 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1873 TMR3TimerQueuesDo(pVM);
1874
1875 /*
1876 * The instruction following an emulated STI should *always* be executed!
1877 *
1878 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1879 * the eip is the same as the inhibited instr address. Before we
1880 * are able to execute this instruction in raw mode (iret to
1881 * guest code) an external interrupt might force a world switch
1882 * again. Possibly allowing a guest interrupt to be dispatched
1883 * in the process. This could break the guest. Sounds very
1884 * unlikely, but such timing sensitive problem are not as rare as
1885 * you might think.
1886 */
1887 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1888 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1889 {
1890 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1891 {
1892 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1893 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1894 }
1895 else
1896 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1897 }
1898
1899 /*
1900 * Interrupts.
1901 */
1902 bool fWakeupPending = false;
1903 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1904 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1905 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1906 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1907#ifdef VBOX_WITH_RAW_MODE
1908 && PATMAreInterruptsEnabled(pVM)
1909#else
1910 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1911#endif
1912 && !HMR3IsEventPending(pVCpu))
1913 {
1914 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1915 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1916 {
1917 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1918 /** @todo this really isn't nice, should properly handle this */
1919 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1920 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1921 rc2 = VINF_EM_RESCHEDULE;
1922#ifdef VBOX_STRICT
1923 rcIrq = rc2;
1924#endif
1925 UPDATE_RC();
1926 /* Reschedule required: We must not miss the wakeup below! */
1927 fWakeupPending = true;
1928 }
1929#ifdef VBOX_WITH_REM
1930 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1931 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1932 {
1933 Log2(("REMR3QueryPendingInterrupt -> %#x\n", REMR3QueryPendingInterrupt(pVM, pVCpu)));
1934 rc2 = VINF_EM_RESCHEDULE_REM;
1935 UPDATE_RC();
1936 }
1937#endif
1938 }
1939
1940 /*
1941 * Allocate handy pages.
1942 */
1943 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1944 {
1945 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1946 UPDATE_RC();
1947 }
1948
1949 /*
1950 * Debugger Facility request.
1951 */
1952 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1953 {
1954 rc2 = DBGFR3VMMForcedAction(pVM);
1955 UPDATE_RC();
1956 }
1957
1958 /*
1959 * EMT Rendezvous (must be serviced before termination).
1960 */
1961 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1962 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1963 {
1964 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1965 UPDATE_RC();
1966 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1967 * stopped/reset before the next VM state change is made. We need a better
1968 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1969 * && rc >= VINF_EM_SUSPEND). */
1970 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1971 {
1972 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1973 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1974 return rc;
1975 }
1976 }
1977
1978 /*
1979 * State change request (cleared by vmR3SetStateLocked).
1980 */
1981 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1982 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1983 {
1984 VMSTATE enmState = VMR3GetState(pVM);
1985 switch (enmState)
1986 {
1987 case VMSTATE_FATAL_ERROR:
1988 case VMSTATE_FATAL_ERROR_LS:
1989 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1990 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1991 return VINF_EM_SUSPEND;
1992
1993 case VMSTATE_DESTROYING:
1994 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1995 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1996 return VINF_EM_TERMINATE;
1997
1998 default:
1999 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2000 }
2001 }
2002
2003 /*
2004 * Out of memory? Since most of our fellow high priority actions may cause us
2005 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2006 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2007 * than us since we can terminate without allocating more memory.
2008 */
2009 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2010 {
2011 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2012 UPDATE_RC();
2013 if (rc == VINF_EM_NO_MEMORY)
2014 return rc;
2015 }
2016
2017 /*
2018 * If the virtual sync clock is still stopped, make TM restart it.
2019 */
2020 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2021 TMR3VirtualSyncFF(pVM, pVCpu);
2022
2023#ifdef DEBUG
2024 /*
2025 * Debug, pause the VM.
2026 */
2027 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2028 {
2029 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2030 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2031 return VINF_EM_SUSPEND;
2032 }
2033#endif
2034
2035 /* check that we got them all */
2036 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2037 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2038 }
2039
2040#undef UPDATE_RC
2041 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2042 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2043 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2044 return rc;
2045}
2046
2047
2048/**
2049 * Check if the preset execution time cap restricts guest execution scheduling.
2050 *
2051 * @returns true if allowed, false otherwise
2052 * @param pVM Pointer to the VM.
2053 * @param pVCpu Pointer to the VMCPU.
2054 */
2055bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2056{
2057 uint64_t u64UserTime, u64KernelTime;
2058
2059 if ( pVM->uCpuExecutionCap != 100
2060 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2061 {
2062 uint64_t u64TimeNow = RTTimeMilliTS();
2063 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2064 {
2065 /* New time slice. */
2066 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2067 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2068 pVCpu->em.s.u64TimeSliceExec = 0;
2069 }
2070 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2071
2072 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2073 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2074 return false;
2075 }
2076 return true;
2077}
2078
2079
2080/**
2081 * Execute VM.
2082 *
2083 * This function is the main loop of the VM. The emulation thread
2084 * calls this function when the VM has been successfully constructed
2085 * and we're ready for executing the VM.
2086 *
2087 * Returning from this function means that the VM is turned off or
2088 * suspended (state already saved) and deconstruction is next in line.
2089 *
2090 * All interaction from other thread are done using forced actions
2091 * and signaling of the wait object.
2092 *
2093 * @returns VBox status code, informational status codes may indicate failure.
2094 * @param pVM Pointer to the VM.
2095 * @param pVCpu Pointer to the VMCPU.
2096 */
2097VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2098{
2099 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2100 pVM,
2101 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2102 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2103 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2104 pVCpu->em.s.fForceRAW));
2105 VM_ASSERT_EMT(pVM);
2106 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2107 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2108 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2109 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2110
2111 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2112 if (rc == 0)
2113 {
2114 /*
2115 * Start the virtual time.
2116 */
2117 TMR3NotifyResume(pVM, pVCpu);
2118
2119 /*
2120 * The Outer Main Loop.
2121 */
2122 bool fFFDone = false;
2123
2124 /* Reschedule right away to start in the right state. */
2125 rc = VINF_SUCCESS;
2126
2127 /* If resuming after a pause or a state load, restore the previous
2128 state or else we'll start executing code. Else, just reschedule. */
2129 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2130 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2131 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2132 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2133 else
2134 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2135 pVCpu->em.s.cIemThenRemInstructions = 0;
2136 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2137
2138 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2139 for (;;)
2140 {
2141 /*
2142 * Before we can schedule anything (we're here because
2143 * scheduling is required) we must service any pending
2144 * forced actions to avoid any pending action causing
2145 * immediate rescheduling upon entering an inner loop
2146 *
2147 * Do forced actions.
2148 */
2149 if ( !fFFDone
2150 && RT_SUCCESS(rc)
2151 && rc != VINF_EM_TERMINATE
2152 && rc != VINF_EM_OFF
2153 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2154 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2155 {
2156 rc = emR3ForcedActions(pVM, pVCpu, rc);
2157 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2158 if ( ( rc == VINF_EM_RESCHEDULE_REM
2159 || rc == VINF_EM_RESCHEDULE_HM)
2160 && pVCpu->em.s.fForceRAW)
2161 rc = VINF_EM_RESCHEDULE_RAW;
2162 }
2163 else if (fFFDone)
2164 fFFDone = false;
2165
2166 /*
2167 * Now what to do?
2168 */
2169 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2170 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2171 switch (rc)
2172 {
2173 /*
2174 * Keep doing what we're currently doing.
2175 */
2176 case VINF_SUCCESS:
2177 break;
2178
2179 /*
2180 * Reschedule - to raw-mode execution.
2181 */
2182 case VINF_EM_RESCHEDULE_RAW:
2183 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2184 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2185 pVCpu->em.s.enmState = EMSTATE_RAW;
2186 break;
2187
2188 /*
2189 * Reschedule - to hardware accelerated raw-mode execution.
2190 */
2191 case VINF_EM_RESCHEDULE_HM:
2192 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2193 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2194 Assert(!pVCpu->em.s.fForceRAW);
2195 pVCpu->em.s.enmState = EMSTATE_HM;
2196 break;
2197
2198 /*
2199 * Reschedule - to recompiled execution.
2200 */
2201 case VINF_EM_RESCHEDULE_REM:
2202#ifdef VBOX_WITH_FIRST_IEM_STEP
2203 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2204 if (HMIsEnabled(pVM))
2205 {
2206 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2207 enmOldState, EMSTATE_IEM_THEN_REM));
2208 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2209 {
2210 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2211 pVCpu->em.s.cIemThenRemInstructions = 0;
2212 }
2213 }
2214 else
2215 {
2216 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2217 pVCpu->em.s.enmState = EMSTATE_REM;
2218 }
2219#else
2220 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2221 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2222 pVCpu->em.s.enmState = EMSTATE_REM;
2223#endif
2224 break;
2225
2226 /*
2227 * Resume.
2228 */
2229 case VINF_EM_RESUME:
2230 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2231 /* Don't reschedule in the halted or wait for SIPI case. */
2232 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2233 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2234 {
2235 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2236 break;
2237 }
2238 /* fall through and get scheduled. */
2239
2240 /*
2241 * Reschedule.
2242 */
2243 case VINF_EM_RESCHEDULE:
2244 {
2245 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2246 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2247 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2248 pVCpu->em.s.cIemThenRemInstructions = 0;
2249 pVCpu->em.s.enmState = enmState;
2250 break;
2251 }
2252
2253 /*
2254 * Halted.
2255 */
2256 case VINF_EM_HALT:
2257 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2258 pVCpu->em.s.enmState = EMSTATE_HALTED;
2259 break;
2260
2261 /*
2262 * Switch to the wait for SIPI state (application processor only)
2263 */
2264 case VINF_EM_WAIT_SIPI:
2265 Assert(pVCpu->idCpu != 0);
2266 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2267 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2268 break;
2269
2270
2271 /*
2272 * Suspend.
2273 */
2274 case VINF_EM_SUSPEND:
2275 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2276 Assert(enmOldState != EMSTATE_SUSPENDED);
2277 pVCpu->em.s.enmPrevState = enmOldState;
2278 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2279 break;
2280
2281 /*
2282 * Reset.
2283 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2284 */
2285 case VINF_EM_RESET:
2286 {
2287 if (pVCpu->idCpu == 0)
2288 {
2289 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2290 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2291 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2292 pVCpu->em.s.cIemThenRemInstructions = 0;
2293 pVCpu->em.s.enmState = enmState;
2294 }
2295 else
2296 {
2297 /* All other VCPUs go into the wait for SIPI state. */
2298 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2299 }
2300 break;
2301 }
2302
2303 /*
2304 * Power Off.
2305 */
2306 case VINF_EM_OFF:
2307 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2308 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2309 TMR3NotifySuspend(pVM, pVCpu);
2310 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2311 return rc;
2312
2313 /*
2314 * Terminate the VM.
2315 */
2316 case VINF_EM_TERMINATE:
2317 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2318 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2319 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2320 TMR3NotifySuspend(pVM, pVCpu);
2321 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2322 return rc;
2323
2324
2325 /*
2326 * Out of memory, suspend the VM and stuff.
2327 */
2328 case VINF_EM_NO_MEMORY:
2329 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2330 Assert(enmOldState != EMSTATE_SUSPENDED);
2331 pVCpu->em.s.enmPrevState = enmOldState;
2332 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2333 TMR3NotifySuspend(pVM, pVCpu);
2334 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2335
2336 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2337 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2338 if (rc != VINF_EM_SUSPEND)
2339 {
2340 if (RT_SUCCESS_NP(rc))
2341 {
2342 AssertLogRelMsgFailed(("%Rrc\n", rc));
2343 rc = VERR_EM_INTERNAL_ERROR;
2344 }
2345 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2346 }
2347 return rc;
2348
2349 /*
2350 * Guest debug events.
2351 */
2352 case VINF_EM_DBG_STEPPED:
2353 case VINF_EM_DBG_STOP:
2354 case VINF_EM_DBG_BREAKPOINT:
2355 case VINF_EM_DBG_STEP:
2356 if (enmOldState == EMSTATE_RAW)
2357 {
2358 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2359 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2360 }
2361 else if (enmOldState == EMSTATE_HM)
2362 {
2363 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2364 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2365 }
2366 else if (enmOldState == EMSTATE_REM)
2367 {
2368 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2369 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2370 }
2371 else
2372 {
2373 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2374 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2375 }
2376 break;
2377
2378 /*
2379 * Hypervisor debug events.
2380 */
2381 case VINF_EM_DBG_HYPER_STEPPED:
2382 case VINF_EM_DBG_HYPER_BREAKPOINT:
2383 case VINF_EM_DBG_HYPER_ASSERTION:
2384 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2385 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2386 break;
2387
2388 /*
2389 * Triple fault.
2390 */
2391 case VINF_EM_TRIPLE_FAULT:
2392 if (!pVM->em.s.fGuruOnTripleFault)
2393 {
2394 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2395 Assert(pVM->cCpus == 1);
2396 REMR3Reset(pVM);
2397 PGMR3ResetCpu(pVM, pVCpu);
2398 TRPMR3ResetCpu(pVCpu);
2399 CPUMR3ResetCpu(pVM, pVCpu);
2400 EMR3ResetCpu(pVCpu);
2401 HMR3ResetCpu(pVCpu);
2402 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2403 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d\n", rc, enmOldState, pVCpu->em.s.enmState));
2404 break;
2405 }
2406 /* Else fall through and trigger a guru. */
2407 case VERR_VMM_RING0_ASSERTION:
2408 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2409 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2410 break;
2411
2412 /*
2413 * Any error code showing up here other than the ones we
2414 * know and process above are considered to be FATAL.
2415 *
2416 * Unknown warnings and informational status codes are also
2417 * included in this.
2418 */
2419 default:
2420 if (RT_SUCCESS_NP(rc))
2421 {
2422 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2423 rc = VERR_EM_INTERNAL_ERROR;
2424 }
2425 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2426 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2427 break;
2428 }
2429
2430 /*
2431 * Act on state transition.
2432 */
2433 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2434 if (enmOldState != enmNewState)
2435 {
2436 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2437
2438 /* Clear MWait flags. */
2439 if ( enmOldState == EMSTATE_HALTED
2440 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2441 && ( enmNewState == EMSTATE_RAW
2442 || enmNewState == EMSTATE_HM
2443 || enmNewState == EMSTATE_REM
2444 || enmNewState == EMSTATE_IEM_THEN_REM
2445 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2446 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2447 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2448 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2449 {
2450 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2451 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2452 }
2453 }
2454 else
2455 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2456
2457 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2458 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2459
2460 /*
2461 * Act on the new state.
2462 */
2463 switch (enmNewState)
2464 {
2465 /*
2466 * Execute raw.
2467 */
2468 case EMSTATE_RAW:
2469#ifdef VBOX_WITH_RAW_MODE
2470 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2471#else
2472 AssertLogRelMsgFailed(("%Rrc\n", rc));
2473 rc = VERR_EM_INTERNAL_ERROR;
2474#endif
2475 break;
2476
2477 /*
2478 * Execute hardware accelerated raw.
2479 */
2480 case EMSTATE_HM:
2481 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2482 break;
2483
2484 /*
2485 * Execute recompiled.
2486 */
2487 case EMSTATE_REM:
2488 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2489 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2490 break;
2491
2492 /*
2493 * Execute in the interpreter.
2494 */
2495 case EMSTATE_IEM:
2496 {
2497#if 0 /* For testing purposes. */
2498 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2499 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2500 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2501 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2502 rc = VINF_SUCCESS;
2503 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2504#endif
2505 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
2506 if (pVM->em.s.fIemExecutesAll)
2507 {
2508 Assert(rc != VINF_EM_RESCHEDULE_REM);
2509 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2510 Assert(rc != VINF_EM_RESCHEDULE_HM);
2511 }
2512 fFFDone = false;
2513 break;
2514 }
2515
2516 /*
2517 * Execute in IEM, hoping we can quickly switch aback to HM
2518 * or RAW execution. If our hopes fail, we go to REM.
2519 */
2520 case EMSTATE_IEM_THEN_REM:
2521 {
2522 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2523 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2524 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2525 break;
2526 }
2527
2528 /*
2529 * Application processor execution halted until SIPI.
2530 */
2531 case EMSTATE_WAIT_SIPI:
2532 /* no break */
2533 /*
2534 * hlt - execution halted until interrupt.
2535 */
2536 case EMSTATE_HALTED:
2537 {
2538 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2539 /* If HM (or someone else) store a pending interrupt in
2540 TRPM, it must be dispatched ASAP without any halting.
2541 Anything pending in TRPM has been accepted and the CPU
2542 should already be the right state to receive it. */
2543 if (TRPMHasTrap(pVCpu))
2544 rc = VINF_EM_RESCHEDULE;
2545 /* MWAIT has a special extension where it's woken up when
2546 an interrupt is pending even when IF=0. */
2547 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2548 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2549 {
2550 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2551 if ( rc == VINF_SUCCESS
2552 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2553 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2554 {
2555 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2556 rc = VINF_EM_RESCHEDULE;
2557 }
2558 }
2559 else
2560 {
2561 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2562 if ( rc == VINF_SUCCESS
2563 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI))
2564 {
2565 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI after HLT\n"));
2566 rc = VINF_EM_RESCHEDULE;
2567 }
2568 }
2569
2570 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2571 break;
2572 }
2573
2574 /*
2575 * Suspended - return to VM.cpp.
2576 */
2577 case EMSTATE_SUSPENDED:
2578 TMR3NotifySuspend(pVM, pVCpu);
2579 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2580 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2581 return VINF_EM_SUSPEND;
2582
2583 /*
2584 * Debugging in the guest.
2585 */
2586 case EMSTATE_DEBUG_GUEST_RAW:
2587 case EMSTATE_DEBUG_GUEST_HM:
2588 case EMSTATE_DEBUG_GUEST_IEM:
2589 case EMSTATE_DEBUG_GUEST_REM:
2590 TMR3NotifySuspend(pVM, pVCpu);
2591 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2592 TMR3NotifyResume(pVM, pVCpu);
2593 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2594 break;
2595
2596 /*
2597 * Debugging in the hypervisor.
2598 */
2599 case EMSTATE_DEBUG_HYPER:
2600 {
2601 TMR3NotifySuspend(pVM, pVCpu);
2602 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2603
2604 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2605 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2606 if (rc != VINF_SUCCESS)
2607 {
2608 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2609 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2610 else
2611 {
2612 /* switch to guru meditation mode */
2613 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2614 VMMR3FatalDump(pVM, pVCpu, rc);
2615 }
2616 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2617 return rc;
2618 }
2619
2620 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2621 TMR3NotifyResume(pVM, pVCpu);
2622 break;
2623 }
2624
2625 /*
2626 * Guru meditation takes place in the debugger.
2627 */
2628 case EMSTATE_GURU_MEDITATION:
2629 {
2630 TMR3NotifySuspend(pVM, pVCpu);
2631 VMMR3FatalDump(pVM, pVCpu, rc);
2632 emR3Debug(pVM, pVCpu, rc);
2633 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2634 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2635 return rc;
2636 }
2637
2638 /*
2639 * The states we don't expect here.
2640 */
2641 case EMSTATE_NONE:
2642 case EMSTATE_TERMINATING:
2643 default:
2644 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2645 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2646 TMR3NotifySuspend(pVM, pVCpu);
2647 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2648 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2649 return VERR_EM_INTERNAL_ERROR;
2650 }
2651 } /* The Outer Main Loop */
2652 }
2653 else
2654 {
2655 /*
2656 * Fatal error.
2657 */
2658 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2659 TMR3NotifySuspend(pVM, pVCpu);
2660 VMMR3FatalDump(pVM, pVCpu, rc);
2661 emR3Debug(pVM, pVCpu, rc);
2662 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2663 /** @todo change the VM state! */
2664 return rc;
2665 }
2666
2667 /* (won't ever get here). */
2668 AssertFailed();
2669}
2670
2671/**
2672 * Notify EM of a state change (used by FTM)
2673 *
2674 * @param pVM Pointer to the VM.
2675 */
2676VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2677{
2678 PVMCPU pVCpu = VMMGetCpu(pVM);
2679
2680 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2681 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2682 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2683 return VINF_SUCCESS;
2684}
2685
2686/**
2687 * Notify EM of a state change (used by FTM)
2688 *
2689 * @param pVM Pointer to the VM.
2690 */
2691VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2692{
2693 PVMCPU pVCpu = VMMGetCpu(pVM);
2694 EMSTATE enmCurState = pVCpu->em.s.enmState;
2695
2696 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2697 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2698 pVCpu->em.s.enmPrevState = enmCurState;
2699 return VINF_SUCCESS;
2700}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette