VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 55705

Last change on this file since 55705 was 55036, checked in by vboxsync, 10 years ago

VMM/EM: fix rc priority.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 122.8 KB
Line 
1/* $Id: EM.cpp 55036 2015-03-31 14:03:50Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/patm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/iom.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/pgm.h>
48#ifdef VBOX_WITH_REM
49# include <VBox/vmm/rem.h>
50#endif
51#include <VBox/vmm/tm.h>
52#include <VBox/vmm/mm.h>
53#include <VBox/vmm/ssm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/pdmqueue.h>
57#include <VBox/vmm/hm.h>
58#include <VBox/vmm/patm.h>
59#include "EMInternal.h"
60#include <VBox/vmm/vm.h>
61#include <VBox/vmm/uvm.h>
62#include <VBox/vmm/cpumdis.h>
63#include <VBox/dis.h>
64#include <VBox/disopcode.h>
65#include "VMMTracing.h"
66
67#include <iprt/asm.h>
68#include <iprt/string.h>
69#include <iprt/stream.h>
70#include <iprt/thread.h>
71
72
73/*******************************************************************************
74* Defined Constants And Macros *
75*******************************************************************************/
76#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
77#define EM_NOTIFY_HM
78#endif
79
80
81/*******************************************************************************
82* Internal Functions *
83*******************************************************************************/
84static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
85static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
86#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
87static const char *emR3GetStateName(EMSTATE enmState);
88#endif
89static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
90static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
91static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
92int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
93
94
95/**
96 * Initializes the EM.
97 *
98 * @returns VBox status code.
99 * @param pVM Pointer to the VM.
100 */
101VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
102{
103 LogFlow(("EMR3Init\n"));
104 /*
105 * Assert alignment and sizes.
106 */
107 AssertCompileMemberAlignment(VM, em.s, 32);
108 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
109 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
110
111 /*
112 * Init the structure.
113 */
114 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
115 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
116 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
117
118 bool fEnabled;
119 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
120 AssertLogRelRCReturn(rc, rc);
121 pVM->fRecompileUser = !fEnabled;
122
123 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileSupervisor = !fEnabled;
126
127#ifdef VBOX_WITH_RAW_RING1
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
129 AssertLogRelRCReturn(rc, rc);
130#else
131 pVM->fRawRing1Enabled = false; /* Disabled by default. */
132#endif
133
134 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
135 AssertLogRelRCReturn(rc, rc);
136
137 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
138 AssertLogRelRCReturn(rc, rc);
139 pVM->em.s.fGuruOnTripleFault = !fEnabled;
140 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
141 {
142 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
143 pVM->em.s.fGuruOnTripleFault = true;
144 }
145
146 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
147 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
148
149#ifdef VBOX_WITH_REM
150 /*
151 * Initialize the REM critical section.
152 */
153 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
154 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
155 AssertRCReturn(rc, rc);
156#endif
157
158 /*
159 * Saved state.
160 */
161 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
162 NULL, NULL, NULL,
163 NULL, emR3Save, NULL,
164 NULL, emR3Load, NULL);
165 if (RT_FAILURE(rc))
166 return rc;
167
168 for (VMCPUID i = 0; i < pVM->cCpus; i++)
169 {
170 PVMCPU pVCpu = &pVM->aCpus[i];
171
172 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
173 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
174 pVCpu->em.s.fForceRAW = false;
175
176 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
177#ifdef VBOX_WITH_RAW_MODE
178 if (!HMIsEnabled(pVM))
179 {
180 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
181 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
182 }
183#endif
184
185 /* Force reset of the time slice. */
186 pVCpu->em.s.u64TimeSliceStart = 0;
187
188# define EM_REG_COUNTER(a, b, c) \
189 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
190 AssertRC(rc);
191
192# define EM_REG_COUNTER_USED(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_PROFILE(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE_ADV(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204 /*
205 * Statistics.
206 */
207#ifdef VBOX_WITH_STATISTICS
208 PEMSTATS pStats;
209 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
210 if (RT_FAILURE(rc))
211 return rc;
212
213 pVCpu->em.s.pStatsR3 = pStats;
214 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
215 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
216
217 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
218 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
219
220 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
221 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
222
223 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
297
298 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
299 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
300
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
353
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
382
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
387
388 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
389# ifdef VBOX_WITH_FIRST_IEM_STEP
390 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
391# else
392 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
393 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
394# endif
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
430 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
433 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
435 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
436
437#endif /* VBOX_WITH_STATISTICS */
438
439 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
440 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
441 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
442 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
443 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
444
445 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
446 }
447
448 emR3InitDbg(pVM);
449 return VINF_SUCCESS;
450}
451
452
453/**
454 * Applies relocations to data and code managed by this
455 * component. This function will be called at init and
456 * whenever the VMM need to relocate it self inside the GC.
457 *
458 * @param pVM Pointer to the VM.
459 */
460VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
461{
462 LogFlow(("EMR3Relocate\n"));
463 for (VMCPUID i = 0; i < pVM->cCpus; i++)
464 {
465 PVMCPU pVCpu = &pVM->aCpus[i];
466 if (pVCpu->em.s.pStatsR3)
467 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
468 }
469}
470
471
472/**
473 * Reset the EM state for a CPU.
474 *
475 * Called by EMR3Reset and hot plugging.
476 *
477 * @param pVCpu Pointer to the VMCPU.
478 */
479VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
480{
481 pVCpu->em.s.fForceRAW = false;
482
483 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
484 out of the HALTED state here so that enmPrevState doesn't end up as
485 HALTED when EMR3Execute returns. */
486 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
487 {
488 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
489 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
490 }
491}
492
493
494/**
495 * Reset notification.
496 *
497 * @param pVM Pointer to the VM.
498 */
499VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
500{
501 Log(("EMR3Reset: \n"));
502 for (VMCPUID i = 0; i < pVM->cCpus; i++)
503 EMR3ResetCpu(&pVM->aCpus[i]);
504}
505
506
507/**
508 * Terminates the EM.
509 *
510 * Termination means cleaning up and freeing all resources,
511 * the VM it self is at this point powered off or suspended.
512 *
513 * @returns VBox status code.
514 * @param pVM Pointer to the VM.
515 */
516VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
517{
518 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
519
520#ifdef VBOX_WITH_REM
521 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
522#endif
523 return VINF_SUCCESS;
524}
525
526
527/**
528 * Execute state save operation.
529 *
530 * @returns VBox status code.
531 * @param pVM Pointer to the VM.
532 * @param pSSM SSM operation handle.
533 */
534static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
535{
536 for (VMCPUID i = 0; i < pVM->cCpus; i++)
537 {
538 PVMCPU pVCpu = &pVM->aCpus[i];
539
540 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
541 AssertRCReturn(rc, rc);
542
543 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
544 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
545 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
546 AssertRCReturn(rc, rc);
547
548 /* Save mwait state. */
549 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
550 AssertRCReturn(rc, rc);
551 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
552 AssertRCReturn(rc, rc);
553 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
554 AssertRCReturn(rc, rc);
555 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
556 AssertRCReturn(rc, rc);
557 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
558 AssertRCReturn(rc, rc);
559 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
560 AssertRCReturn(rc, rc);
561 }
562 return VINF_SUCCESS;
563}
564
565
566/**
567 * Execute state load operation.
568 *
569 * @returns VBox status code.
570 * @param pVM Pointer to the VM.
571 * @param pSSM SSM operation handle.
572 * @param uVersion Data layout version.
573 * @param uPass The data pass.
574 */
575static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
576{
577 /*
578 * Validate version.
579 */
580 if ( uVersion > EM_SAVED_STATE_VERSION
581 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
582 {
583 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
584 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
585 }
586 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
587
588 /*
589 * Load the saved state.
590 */
591 for (VMCPUID i = 0; i < pVM->cCpus; i++)
592 {
593 PVMCPU pVCpu = &pVM->aCpus[i];
594
595 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
596 if (RT_FAILURE(rc))
597 pVCpu->em.s.fForceRAW = false;
598 AssertRCReturn(rc, rc);
599
600 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
601 {
602 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
603 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
604 AssertRCReturn(rc, rc);
605 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
606
607 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
608 }
609 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
610 {
611 /* Load mwait state. */
612 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
613 AssertRCReturn(rc, rc);
614 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
615 AssertRCReturn(rc, rc);
616 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
617 AssertRCReturn(rc, rc);
618 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
619 AssertRCReturn(rc, rc);
620 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
621 AssertRCReturn(rc, rc);
622 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
623 AssertRCReturn(rc, rc);
624 }
625
626 Assert(!pVCpu->em.s.pCliStatTree);
627 }
628 return VINF_SUCCESS;
629}
630
631
632/**
633 * Argument packet for emR3SetExecutionPolicy.
634 */
635struct EMR3SETEXECPOLICYARGS
636{
637 EMEXECPOLICY enmPolicy;
638 bool fEnforce;
639};
640
641
642/**
643 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
644 */
645static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
646{
647 /*
648 * Only the first CPU changes the variables.
649 */
650 if (pVCpu->idCpu == 0)
651 {
652 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
653 switch (pArgs->enmPolicy)
654 {
655 case EMEXECPOLICY_RECOMPILE_RING0:
656 pVM->fRecompileSupervisor = pArgs->fEnforce;
657 break;
658 case EMEXECPOLICY_RECOMPILE_RING3:
659 pVM->fRecompileUser = pArgs->fEnforce;
660 break;
661 case EMEXECPOLICY_IEM_ALL:
662 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
663 break;
664 default:
665 AssertFailedReturn(VERR_INVALID_PARAMETER);
666 }
667 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
668 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
669 }
670
671 /*
672 * Force rescheduling if in RAW, HM, IEM, or REM.
673 */
674 return pVCpu->em.s.enmState == EMSTATE_RAW
675 || pVCpu->em.s.enmState == EMSTATE_HM
676 || pVCpu->em.s.enmState == EMSTATE_IEM
677 || pVCpu->em.s.enmState == EMSTATE_REM
678 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
679 ? VINF_EM_RESCHEDULE
680 : VINF_SUCCESS;
681}
682
683
684/**
685 * Changes an execution scheduling policy parameter.
686 *
687 * This is used to enable or disable raw-mode / hardware-virtualization
688 * execution of user and supervisor code.
689 *
690 * @returns VINF_SUCCESS on success.
691 * @returns VINF_RESCHEDULE if a rescheduling might be required.
692 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
693 *
694 * @param pUVM The user mode VM handle.
695 * @param enmPolicy The scheduling policy to change.
696 * @param fEnforce Whether to enforce the policy or not.
697 */
698VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
699{
700 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
701 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
702 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
703
704 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
705 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
706}
707
708
709/**
710 * Queries an execution scheduling policy parameter.
711 *
712 * @returns VBox status code
713 * @param pUVM The user mode VM handle.
714 * @param enmPolicy The scheduling policy to query.
715 * @param pfEnforced Where to return the current value.
716 */
717VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
718{
719 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
720 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
721 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
722 PVM pVM = pUVM->pVM;
723 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
724
725 /* No need to bother EMTs with a query. */
726 switch (enmPolicy)
727 {
728 case EMEXECPOLICY_RECOMPILE_RING0:
729 *pfEnforced = pVM->fRecompileSupervisor;
730 break;
731 case EMEXECPOLICY_RECOMPILE_RING3:
732 *pfEnforced = pVM->fRecompileUser;
733 break;
734 case EMEXECPOLICY_IEM_ALL:
735 *pfEnforced = pVM->em.s.fIemExecutesAll;
736 break;
737 default:
738 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
739 }
740
741 return VINF_SUCCESS;
742}
743
744
745/**
746 * Raise a fatal error.
747 *
748 * Safely terminate the VM with full state report and stuff. This function
749 * will naturally never return.
750 *
751 * @param pVCpu Pointer to the VMCPU.
752 * @param rc VBox status code.
753 */
754VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
755{
756 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
757 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
758 AssertReleaseMsgFailed(("longjmp returned!\n"));
759}
760
761
762#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
763/**
764 * Gets the EM state name.
765 *
766 * @returns pointer to read only state name,
767 * @param enmState The state.
768 */
769static const char *emR3GetStateName(EMSTATE enmState)
770{
771 switch (enmState)
772 {
773 case EMSTATE_NONE: return "EMSTATE_NONE";
774 case EMSTATE_RAW: return "EMSTATE_RAW";
775 case EMSTATE_HM: return "EMSTATE_HM";
776 case EMSTATE_IEM: return "EMSTATE_IEM";
777 case EMSTATE_REM: return "EMSTATE_REM";
778 case EMSTATE_HALTED: return "EMSTATE_HALTED";
779 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
780 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
781 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
782 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
783 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
784 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
785 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
786 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
787 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
788 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
789 default: return "Unknown!";
790 }
791}
792#endif /* LOG_ENABLED || VBOX_STRICT */
793
794
795/**
796 * Debug loop.
797 *
798 * @returns VBox status code for EM.
799 * @param pVM Pointer to the VM.
800 * @param pVCpu Pointer to the VMCPU.
801 * @param rc Current EM VBox status code.
802 */
803static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
804{
805 for (;;)
806 {
807 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
808 const VBOXSTRICTRC rcLast = rc;
809
810 /*
811 * Debug related RC.
812 */
813 switch (VBOXSTRICTRC_VAL(rc))
814 {
815 /*
816 * Single step an instruction.
817 */
818 case VINF_EM_DBG_STEP:
819 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
820 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
821 || pVCpu->em.s.fForceRAW /* paranoia */)
822#ifdef VBOX_WITH_RAW_MODE
823 rc = emR3RawStep(pVM, pVCpu);
824#else
825 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
826#endif
827 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
828 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
829#ifdef VBOX_WITH_REM
830 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
831 rc = emR3RemStep(pVM, pVCpu);
832#endif
833 else
834 {
835 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
836 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
837 rc = VINF_EM_DBG_STEPPED;
838 }
839 break;
840
841 /*
842 * Simple events: stepped, breakpoint, stop/assertion.
843 */
844 case VINF_EM_DBG_STEPPED:
845 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
846 break;
847
848 case VINF_EM_DBG_BREAKPOINT:
849 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
850 break;
851
852 case VINF_EM_DBG_STOP:
853 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
854 break;
855
856 case VINF_EM_DBG_HYPER_STEPPED:
857 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
858 break;
859
860 case VINF_EM_DBG_HYPER_BREAKPOINT:
861 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
862 break;
863
864 case VINF_EM_DBG_HYPER_ASSERTION:
865 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
866 RTLogFlush(NULL);
867 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
868 break;
869
870 /*
871 * Guru meditation.
872 */
873 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
874 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
875 break;
876 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
877 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
878 break;
879
880 default: /** @todo don't use default for guru, but make special errors code! */
881 {
882 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
883 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
884 break;
885 }
886 }
887
888 /*
889 * Process the result.
890 */
891 do
892 {
893 switch (VBOXSTRICTRC_VAL(rc))
894 {
895 /*
896 * Continue the debugging loop.
897 */
898 case VINF_EM_DBG_STEP:
899 case VINF_EM_DBG_STOP:
900 case VINF_EM_DBG_STEPPED:
901 case VINF_EM_DBG_BREAKPOINT:
902 case VINF_EM_DBG_HYPER_STEPPED:
903 case VINF_EM_DBG_HYPER_BREAKPOINT:
904 case VINF_EM_DBG_HYPER_ASSERTION:
905 break;
906
907 /*
908 * Resuming execution (in some form) has to be done here if we got
909 * a hypervisor debug event.
910 */
911 case VINF_SUCCESS:
912 case VINF_EM_RESUME:
913 case VINF_EM_SUSPEND:
914 case VINF_EM_RESCHEDULE:
915 case VINF_EM_RESCHEDULE_RAW:
916 case VINF_EM_RESCHEDULE_REM:
917 case VINF_EM_HALT:
918 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
919 {
920#ifdef VBOX_WITH_RAW_MODE
921 rc = emR3RawResumeHyper(pVM, pVCpu);
922 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
923 continue;
924#else
925 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
926#endif
927 }
928 if (rc == VINF_SUCCESS)
929 rc = VINF_EM_RESCHEDULE;
930 return rc;
931
932 /*
933 * The debugger isn't attached.
934 * We'll simply turn the thing off since that's the easiest thing to do.
935 */
936 case VERR_DBGF_NOT_ATTACHED:
937 switch (VBOXSTRICTRC_VAL(rcLast))
938 {
939 case VINF_EM_DBG_HYPER_STEPPED:
940 case VINF_EM_DBG_HYPER_BREAKPOINT:
941 case VINF_EM_DBG_HYPER_ASSERTION:
942 case VERR_TRPM_PANIC:
943 case VERR_TRPM_DONT_PANIC:
944 case VERR_VMM_RING0_ASSERTION:
945 case VERR_VMM_HYPER_CR3_MISMATCH:
946 case VERR_VMM_RING3_CALL_DISABLED:
947 return rcLast;
948 }
949 return VINF_EM_OFF;
950
951 /*
952 * Status codes terminating the VM in one or another sense.
953 */
954 case VINF_EM_TERMINATE:
955 case VINF_EM_OFF:
956 case VINF_EM_RESET:
957 case VINF_EM_NO_MEMORY:
958 case VINF_EM_RAW_STALE_SELECTOR:
959 case VINF_EM_RAW_IRET_TRAP:
960 case VERR_TRPM_PANIC:
961 case VERR_TRPM_DONT_PANIC:
962 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
963 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
964 case VERR_VMM_RING0_ASSERTION:
965 case VERR_VMM_HYPER_CR3_MISMATCH:
966 case VERR_VMM_RING3_CALL_DISABLED:
967 case VERR_INTERNAL_ERROR:
968 case VERR_INTERNAL_ERROR_2:
969 case VERR_INTERNAL_ERROR_3:
970 case VERR_INTERNAL_ERROR_4:
971 case VERR_INTERNAL_ERROR_5:
972 case VERR_IPE_UNEXPECTED_STATUS:
973 case VERR_IPE_UNEXPECTED_INFO_STATUS:
974 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
975 return rc;
976
977 /*
978 * The rest is unexpected, and will keep us here.
979 */
980 default:
981 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
982 break;
983 }
984 } while (false);
985 } /* debug for ever */
986}
987
988
989/**
990 * Steps recompiled code.
991 *
992 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
993 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
994 *
995 * @param pVM Pointer to the VM.
996 * @param pVCpu Pointer to the VMCPU.
997 */
998static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
999{
1000 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1001
1002#ifdef VBOX_WITH_REM
1003 EMRemLock(pVM);
1004
1005 /*
1006 * Switch to REM, step instruction, switch back.
1007 */
1008 int rc = REMR3State(pVM, pVCpu);
1009 if (RT_SUCCESS(rc))
1010 {
1011 rc = REMR3Step(pVM, pVCpu);
1012 REMR3StateBack(pVM, pVCpu);
1013 }
1014 EMRemUnlock(pVM);
1015
1016#else
1017 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1018#endif
1019
1020 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1021 return rc;
1022}
1023
1024
1025/**
1026 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1027 * critical section.
1028 *
1029 * @returns false - new fInREMState value.
1030 * @param pVM Pointer to the VM.
1031 * @param pVCpu Pointer to the VMCPU.
1032 */
1033DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1034{
1035#ifdef VBOX_WITH_REM
1036 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1037 REMR3StateBack(pVM, pVCpu);
1038 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1039
1040 EMRemUnlock(pVM);
1041#endif
1042 return false;
1043}
1044
1045
1046/**
1047 * Executes recompiled code.
1048 *
1049 * This function contains the recompiler version of the inner
1050 * execution loop (the outer loop being in EMR3ExecuteVM()).
1051 *
1052 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1053 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1054 *
1055 * @param pVM Pointer to the VM.
1056 * @param pVCpu Pointer to the VMCPU.
1057 * @param pfFFDone Where to store an indicator telling whether or not
1058 * FFs were done before returning.
1059 *
1060 */
1061static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1062{
1063#ifdef LOG_ENABLED
1064 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1065 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1066
1067 if (pCtx->eflags.Bits.u1VM)
1068 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1069 else
1070 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1071#endif
1072 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1073
1074#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1075 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1076 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1077 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1078#endif
1079
1080 /*
1081 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1082 * or the REM suggests raw-mode execution.
1083 */
1084 *pfFFDone = false;
1085#ifdef VBOX_WITH_REM
1086 bool fInREMState = false;
1087#endif
1088 int rc = VINF_SUCCESS;
1089 for (;;)
1090 {
1091#ifdef VBOX_WITH_REM
1092 /*
1093 * Lock REM and update the state if not already in sync.
1094 *
1095 * Note! Big lock, but you are not supposed to own any lock when
1096 * coming in here.
1097 */
1098 if (!fInREMState)
1099 {
1100 EMRemLock(pVM);
1101 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1102
1103 /* Flush the recompiler translation blocks if the VCPU has changed,
1104 also force a full CPU state resync. */
1105 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1106 {
1107 REMFlushTBs(pVM);
1108 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1109 }
1110 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1111
1112 rc = REMR3State(pVM, pVCpu);
1113
1114 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1115 if (RT_FAILURE(rc))
1116 break;
1117 fInREMState = true;
1118
1119 /*
1120 * We might have missed the raising of VMREQ, TIMER and some other
1121 * important FFs while we were busy switching the state. So, check again.
1122 */
1123 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1124 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1125 {
1126 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1127 goto l_REMDoForcedActions;
1128 }
1129 }
1130#endif
1131
1132 /*
1133 * Execute REM.
1134 */
1135 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1136 {
1137 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1138#ifdef VBOX_WITH_REM
1139 rc = REMR3Run(pVM, pVCpu);
1140#else
1141 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
1142#endif
1143 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1144 }
1145 else
1146 {
1147 /* Give up this time slice; virtual time continues */
1148 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1149 RTThreadSleep(5);
1150 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1151 rc = VINF_SUCCESS;
1152 }
1153
1154 /*
1155 * Deal with high priority post execution FFs before doing anything
1156 * else. Sync back the state and leave the lock to be on the safe side.
1157 */
1158 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1159 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1160 {
1161#ifdef VBOX_WITH_REM
1162 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1163#endif
1164 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1165 }
1166
1167 /*
1168 * Process the returned status code.
1169 */
1170 if (rc != VINF_SUCCESS)
1171 {
1172 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1173 break;
1174 if (rc != VINF_REM_INTERRUPED_FF)
1175 {
1176 /*
1177 * Anything which is not known to us means an internal error
1178 * and the termination of the VM!
1179 */
1180 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1181 break;
1182 }
1183 }
1184
1185
1186 /*
1187 * Check and execute forced actions.
1188 *
1189 * Sync back the VM state and leave the lock before calling any of
1190 * these, you never know what's going to happen here.
1191 */
1192#ifdef VBOX_HIGH_RES_TIMERS_HACK
1193 TMTimerPollVoid(pVM, pVCpu);
1194#endif
1195 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1196 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1197 || VMCPU_FF_IS_PENDING(pVCpu,
1198 VMCPU_FF_ALL_REM_MASK
1199 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1200 {
1201l_REMDoForcedActions:
1202#ifdef VBOX_WITH_REM
1203 if (fInREMState)
1204 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1205#endif
1206 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1207 rc = emR3ForcedActions(pVM, pVCpu, rc);
1208 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1209 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1210 if ( rc != VINF_SUCCESS
1211 && rc != VINF_EM_RESCHEDULE_REM)
1212 {
1213 *pfFFDone = true;
1214 break;
1215 }
1216 }
1217
1218 } /* The Inner Loop, recompiled execution mode version. */
1219
1220
1221#ifdef VBOX_WITH_REM
1222 /*
1223 * Returning. Sync back the VM state if required.
1224 */
1225 if (fInREMState)
1226 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1227#endif
1228
1229 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1230 return rc;
1231}
1232
1233
1234#ifdef DEBUG
1235
1236int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1237{
1238 EMSTATE enmOldState = pVCpu->em.s.enmState;
1239
1240 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1241
1242 Log(("Single step BEGIN:\n"));
1243 for (uint32_t i = 0; i < cIterations; i++)
1244 {
1245 DBGFR3PrgStep(pVCpu);
1246 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1247 emR3RemStep(pVM, pVCpu);
1248 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1249 break;
1250 }
1251 Log(("Single step END:\n"));
1252 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1253 pVCpu->em.s.enmState = enmOldState;
1254 return VINF_EM_RESCHEDULE;
1255}
1256
1257#endif /* DEBUG */
1258
1259
1260/**
1261 * Try execute the problematic code in IEM first, then fall back on REM if there
1262 * is too much of it or if IEM doesn't implement something.
1263 *
1264 * @returns Strict VBox status code from IEMExecLots.
1265 * @param pVM The cross context VM structure.
1266 * @param pVCpu The cross context CPU structure for the calling EMT.
1267 * @param pfFFDone Force flags done indicator.
1268 *
1269 * @thread EMT(pVCpu)
1270 */
1271static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1272{
1273 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1274 *pfFFDone = false;
1275
1276 /*
1277 * Execute in IEM for a while.
1278 */
1279 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1280 {
1281 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu);
1282 if (rcStrict != VINF_SUCCESS)
1283 {
1284 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1285 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1286 break;
1287
1288 pVCpu->em.s.cIemThenRemInstructions++;
1289 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1290 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1291 return rcStrict;
1292 }
1293 pVCpu->em.s.cIemThenRemInstructions++;
1294
1295 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1296 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1297 {
1298 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1299 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1300 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1301 pVCpu->em.s.enmState = enmNewState;
1302 return VINF_SUCCESS;
1303 }
1304
1305 /*
1306 * Check for pending actions.
1307 */
1308 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1309 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1310 return VINF_SUCCESS;
1311 }
1312
1313 /*
1314 * Switch to REM.
1315 */
1316 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1317 pVCpu->em.s.enmState = EMSTATE_REM;
1318 return VINF_SUCCESS;
1319}
1320
1321
1322/**
1323 * Decides whether to execute RAW, HWACC or REM.
1324 *
1325 * @returns new EM state
1326 * @param pVM Pointer to the VM.
1327 * @param pVCpu Pointer to the VMCPU.
1328 * @param pCtx Pointer to the guest CPU context.
1329 */
1330EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1331{
1332 /*
1333 * When forcing raw-mode execution, things are simple.
1334 */
1335 if (pVCpu->em.s.fForceRAW)
1336 return EMSTATE_RAW;
1337
1338 /*
1339 * We stay in the wait for SIPI state unless explicitly told otherwise.
1340 */
1341 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1342 return EMSTATE_WAIT_SIPI;
1343
1344 /*
1345 * Execute everything in IEM?
1346 */
1347 if (pVM->em.s.fIemExecutesAll)
1348 return EMSTATE_IEM;
1349
1350 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1351 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1352 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1353
1354 X86EFLAGS EFlags = pCtx->eflags;
1355 if (HMIsEnabled(pVM))
1356 {
1357 /*
1358 * Hardware accelerated raw-mode:
1359 */
1360 if ( EMIsHwVirtExecutionEnabled(pVM)
1361 && HMR3CanExecuteGuest(pVM, pCtx))
1362 return EMSTATE_HM;
1363
1364 /*
1365 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1366 * turns off monitoring features essential for raw mode!
1367 */
1368#ifdef VBOX_WITH_FIRST_IEM_STEP
1369 return EMSTATE_IEM_THEN_REM;
1370#else
1371 return EMSTATE_REM;
1372#endif
1373 }
1374
1375 /*
1376 * Standard raw-mode:
1377 *
1378 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1379 * or 32 bits protected mode ring 0 code
1380 *
1381 * The tests are ordered by the likelihood of being true during normal execution.
1382 */
1383 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1384 {
1385 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1386 return EMSTATE_REM;
1387 }
1388
1389# ifndef VBOX_RAW_V86
1390 if (EFlags.u32 & X86_EFL_VM) {
1391 Log2(("raw mode refused: VM_MASK\n"));
1392 return EMSTATE_REM;
1393 }
1394# endif
1395
1396 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1397 uint32_t u32CR0 = pCtx->cr0;
1398 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1399 {
1400 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1401 return EMSTATE_REM;
1402 }
1403
1404 if (pCtx->cr4 & X86_CR4_PAE)
1405 {
1406 uint32_t u32Dummy, u32Features;
1407
1408 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1409 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1410 return EMSTATE_REM;
1411 }
1412
1413 unsigned uSS = pCtx->ss.Sel;
1414 if ( pCtx->eflags.Bits.u1VM
1415 || (uSS & X86_SEL_RPL) == 3)
1416 {
1417 if (!EMIsRawRing3Enabled(pVM))
1418 return EMSTATE_REM;
1419
1420 if (!(EFlags.u32 & X86_EFL_IF))
1421 {
1422 Log2(("raw mode refused: IF (RawR3)\n"));
1423 return EMSTATE_REM;
1424 }
1425
1426 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1427 {
1428 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1429 return EMSTATE_REM;
1430 }
1431 }
1432 else
1433 {
1434 if (!EMIsRawRing0Enabled(pVM))
1435 return EMSTATE_REM;
1436
1437 if (EMIsRawRing1Enabled(pVM))
1438 {
1439 /* Only ring 0 and 1 supervisor code. */
1440 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1441 {
1442 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1443 return EMSTATE_REM;
1444 }
1445 }
1446 /* Only ring 0 supervisor code. */
1447 else if ((uSS & X86_SEL_RPL) != 0)
1448 {
1449 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1450 return EMSTATE_REM;
1451 }
1452
1453 // Let's start with pure 32 bits ring 0 code first
1454 /** @todo What's pure 32-bit mode? flat? */
1455 if ( !(pCtx->ss.Attr.n.u1DefBig)
1456 || !(pCtx->cs.Attr.n.u1DefBig))
1457 {
1458 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1459 return EMSTATE_REM;
1460 }
1461
1462 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1463 if (!(u32CR0 & X86_CR0_WP))
1464 {
1465 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1466 return EMSTATE_REM;
1467 }
1468
1469# ifdef VBOX_WITH_RAW_MODE
1470 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1471 {
1472 Log2(("raw r0 mode forced: patch code\n"));
1473# ifdef VBOX_WITH_SAFE_STR
1474 Assert(pCtx->tr.Sel);
1475# endif
1476 return EMSTATE_RAW;
1477 }
1478# endif /* VBOX_WITH_RAW_MODE */
1479
1480# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1481 if (!(EFlags.u32 & X86_EFL_IF))
1482 {
1483 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1484 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1485 return EMSTATE_REM;
1486 }
1487# endif
1488
1489# ifndef VBOX_WITH_RAW_RING1
1490 /** @todo still necessary??? */
1491 if (EFlags.Bits.u2IOPL != 0)
1492 {
1493 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1494 return EMSTATE_REM;
1495 }
1496# endif
1497 }
1498
1499 /*
1500 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1501 */
1502 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1503 {
1504 Log2(("raw mode refused: stale CS\n"));
1505 return EMSTATE_REM;
1506 }
1507 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1508 {
1509 Log2(("raw mode refused: stale SS\n"));
1510 return EMSTATE_REM;
1511 }
1512 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1513 {
1514 Log2(("raw mode refused: stale DS\n"));
1515 return EMSTATE_REM;
1516 }
1517 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1518 {
1519 Log2(("raw mode refused: stale ES\n"));
1520 return EMSTATE_REM;
1521 }
1522 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1523 {
1524 Log2(("raw mode refused: stale FS\n"));
1525 return EMSTATE_REM;
1526 }
1527 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1528 {
1529 Log2(("raw mode refused: stale GS\n"));
1530 return EMSTATE_REM;
1531 }
1532
1533# ifdef VBOX_WITH_SAFE_STR
1534 if (pCtx->tr.Sel == 0)
1535 {
1536 Log(("Raw mode refused -> TR=0\n"));
1537 return EMSTATE_REM;
1538 }
1539# endif
1540
1541 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1542 return EMSTATE_RAW;
1543}
1544
1545
1546/**
1547 * Executes all high priority post execution force actions.
1548 *
1549 * @returns rc or a fatal status code.
1550 *
1551 * @param pVM Pointer to the VM.
1552 * @param pVCpu Pointer to the VMCPU.
1553 * @param rc The current rc.
1554 */
1555int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1556{
1557 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1558
1559 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1560 PDMCritSectBothFF(pVCpu);
1561
1562 /* Update CR3 (Nested Paging case for HM). */
1563 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1564 {
1565 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1566 if (RT_FAILURE(rc2))
1567 return rc2;
1568 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1569 }
1570
1571 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1572 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1573 {
1574 if (CPUMIsGuestInPAEMode(pVCpu))
1575 {
1576 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1577 AssertPtr(pPdpes);
1578
1579 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1580 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1581 }
1582 else
1583 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1584 }
1585
1586#ifdef VBOX_WITH_RAW_MODE
1587 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1588 CSAMR3DoPendingAction(pVM, pVCpu);
1589#endif
1590
1591 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1592 {
1593 if ( rc > VINF_EM_NO_MEMORY
1594 && rc <= VINF_EM_LAST)
1595 rc = VINF_EM_NO_MEMORY;
1596 }
1597
1598 return rc;
1599}
1600
1601
1602/**
1603 * Executes all pending forced actions.
1604 *
1605 * Forced actions can cause execution delays and execution
1606 * rescheduling. The first we deal with using action priority, so
1607 * that for instance pending timers aren't scheduled and ran until
1608 * right before execution. The rescheduling we deal with using
1609 * return codes. The same goes for VM termination, only in that case
1610 * we exit everything.
1611 *
1612 * @returns VBox status code of equal or greater importance/severity than rc.
1613 * The most important ones are: VINF_EM_RESCHEDULE,
1614 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1615 *
1616 * @param pVM Pointer to the VM.
1617 * @param pVCpu Pointer to the VMCPU.
1618 * @param rc The current rc.
1619 *
1620 */
1621int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1622{
1623 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1624#ifdef VBOX_STRICT
1625 int rcIrq = VINF_SUCCESS;
1626#endif
1627 int rc2;
1628#define UPDATE_RC() \
1629 do { \
1630 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1631 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1632 break; \
1633 if (!rc || rc2 < rc) \
1634 rc = rc2; \
1635 } while (0)
1636 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1637
1638 /*
1639 * Post execution chunk first.
1640 */
1641 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1642 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1643 {
1644 /*
1645 * EMT Rendezvous (must be serviced before termination).
1646 */
1647 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1648 {
1649 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1650 UPDATE_RC();
1651 /** @todo HACK ALERT! The following test is to make sure EM+TM
1652 * thinks the VM is stopped/reset before the next VM state change
1653 * is made. We need a better solution for this, or at least make it
1654 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1655 * VINF_EM_SUSPEND). */
1656 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1657 {
1658 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1659 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1660 return rc;
1661 }
1662 }
1663
1664 /*
1665 * State change request (cleared by vmR3SetStateLocked).
1666 */
1667 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1668 {
1669 VMSTATE enmState = VMR3GetState(pVM);
1670 switch (enmState)
1671 {
1672 case VMSTATE_FATAL_ERROR:
1673 case VMSTATE_FATAL_ERROR_LS:
1674 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1675 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1676 return VINF_EM_SUSPEND;
1677
1678 case VMSTATE_DESTROYING:
1679 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1680 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1681 return VINF_EM_TERMINATE;
1682
1683 default:
1684 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1685 }
1686 }
1687
1688 /*
1689 * Debugger Facility polling.
1690 */
1691 if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF))
1692 {
1693 rc2 = DBGFR3VMMForcedAction(pVM);
1694 UPDATE_RC();
1695 }
1696
1697 /*
1698 * Postponed reset request.
1699 */
1700 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1701 {
1702 rc2 = VMR3Reset(pVM->pUVM);
1703 UPDATE_RC();
1704 }
1705
1706#ifdef VBOX_WITH_RAW_MODE
1707 /*
1708 * CSAM page scanning.
1709 */
1710 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1711 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1712 {
1713 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1714
1715 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1716 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1717
1718 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1719 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1720 }
1721#endif
1722
1723 /*
1724 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1725 */
1726 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1727 {
1728 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1729 UPDATE_RC();
1730 if (rc == VINF_EM_NO_MEMORY)
1731 return rc;
1732 }
1733
1734 /* check that we got them all */
1735 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1736 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0));
1737 }
1738
1739 /*
1740 * Normal priority then.
1741 * (Executed in no particular order.)
1742 */
1743 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1744 {
1745 /*
1746 * PDM Queues are pending.
1747 */
1748 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1749 PDMR3QueueFlushAll(pVM);
1750
1751 /*
1752 * PDM DMA transfers are pending.
1753 */
1754 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1755 PDMR3DmaRun(pVM);
1756
1757 /*
1758 * EMT Rendezvous (make sure they are handled before the requests).
1759 */
1760 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1761 {
1762 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1763 UPDATE_RC();
1764 /** @todo HACK ALERT! The following test is to make sure EM+TM
1765 * thinks the VM is stopped/reset before the next VM state change
1766 * is made. We need a better solution for this, or at least make it
1767 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1768 * VINF_EM_SUSPEND). */
1769 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1770 {
1771 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1772 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1773 return rc;
1774 }
1775 }
1776
1777 /*
1778 * Requests from other threads.
1779 */
1780 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1781 {
1782 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1783 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1784 {
1785 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1786 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1787 return rc2;
1788 }
1789 UPDATE_RC();
1790 /** @todo HACK ALERT! The following test is to make sure EM+TM
1791 * thinks the VM is stopped/reset before the next VM state change
1792 * is made. We need a better solution for this, or at least make it
1793 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1794 * VINF_EM_SUSPEND). */
1795 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1796 {
1797 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1798 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1799 return rc;
1800 }
1801 }
1802
1803#ifdef VBOX_WITH_REM
1804 /* Replay the handler notification changes. */
1805 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1806 {
1807 /* Try not to cause deadlocks. */
1808 if ( pVM->cCpus == 1
1809 || ( !PGMIsLockOwner(pVM)
1810 && !IOMIsLockWriteOwner(pVM))
1811 )
1812 {
1813 EMRemLock(pVM);
1814 REMR3ReplayHandlerNotifications(pVM);
1815 EMRemUnlock(pVM);
1816 }
1817 }
1818#endif
1819
1820 /* check that we got them all */
1821 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1822 }
1823
1824 /*
1825 * Normal priority then. (per-VCPU)
1826 * (Executed in no particular order.)
1827 */
1828 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1829 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1830 {
1831 /*
1832 * Requests from other threads.
1833 */
1834 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1835 {
1836 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1837 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1838 {
1839 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1840 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1841 return rc2;
1842 }
1843 UPDATE_RC();
1844 /** @todo HACK ALERT! The following test is to make sure EM+TM
1845 * thinks the VM is stopped/reset before the next VM state change
1846 * is made. We need a better solution for this, or at least make it
1847 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1848 * VINF_EM_SUSPEND). */
1849 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1850 {
1851 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1852 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1853 return rc;
1854 }
1855 }
1856
1857 /*
1858 * Forced unhalting of EMT.
1859 */
1860 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
1861 {
1862 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
1863 if (rc == VINF_EM_HALT)
1864 rc = VINF_EM_RESCHEDULE;
1865 else
1866 {
1867 rc2 = VINF_EM_RESCHEDULE;
1868 UPDATE_RC();
1869 }
1870 }
1871
1872 /* check that we got them all */
1873 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST | VMCPU_FF_UNHALT)));
1874 }
1875
1876 /*
1877 * High priority pre execution chunk last.
1878 * (Executed in ascending priority order.)
1879 */
1880 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1881 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1882 {
1883 /*
1884 * Timers before interrupts.
1885 */
1886 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1887 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1888 TMR3TimerQueuesDo(pVM);
1889
1890 /*
1891 * The instruction following an emulated STI should *always* be executed!
1892 *
1893 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1894 * the eip is the same as the inhibited instr address. Before we
1895 * are able to execute this instruction in raw mode (iret to
1896 * guest code) an external interrupt might force a world switch
1897 * again. Possibly allowing a guest interrupt to be dispatched
1898 * in the process. This could break the guest. Sounds very
1899 * unlikely, but such timing sensitive problem are not as rare as
1900 * you might think.
1901 */
1902 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1903 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1904 {
1905 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1906 {
1907 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1908 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1909 }
1910 else
1911 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1912 }
1913
1914 /*
1915 * Interrupts.
1916 */
1917 bool fWakeupPending = false;
1918 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1919 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1920 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1921 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1922#ifdef VBOX_WITH_RAW_MODE
1923 && PATMAreInterruptsEnabled(pVM)
1924#else
1925 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1926#endif
1927 && !HMR3IsEventPending(pVCpu))
1928 {
1929 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1930 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1931 {
1932 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1933 /** @todo this really isn't nice, should properly handle this */
1934 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1935 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1936 rc2 = VINF_EM_RESCHEDULE;
1937#ifdef VBOX_STRICT
1938 rcIrq = rc2;
1939#endif
1940 UPDATE_RC();
1941 /* Reschedule required: We must not miss the wakeup below! */
1942 fWakeupPending = true;
1943 }
1944#ifdef VBOX_WITH_REM
1945 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1946 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1947 {
1948 Log2(("REMR3QueryPendingInterrupt -> %#x\n", REMR3QueryPendingInterrupt(pVM, pVCpu)));
1949 rc2 = VINF_EM_RESCHEDULE_REM;
1950 UPDATE_RC();
1951 }
1952#endif
1953 }
1954
1955 /*
1956 * Allocate handy pages.
1957 */
1958 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1959 {
1960 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1961 UPDATE_RC();
1962 }
1963
1964 /*
1965 * Debugger Facility request.
1966 */
1967 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1968 {
1969 rc2 = DBGFR3VMMForcedAction(pVM);
1970 UPDATE_RC();
1971 }
1972
1973 /*
1974 * EMT Rendezvous (must be serviced before termination).
1975 */
1976 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1977 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1978 {
1979 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1980 UPDATE_RC();
1981 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1982 * stopped/reset before the next VM state change is made. We need a better
1983 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1984 * && rc >= VINF_EM_SUSPEND). */
1985 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1986 {
1987 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1988 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1989 return rc;
1990 }
1991 }
1992
1993 /*
1994 * State change request (cleared by vmR3SetStateLocked).
1995 */
1996 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1997 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1998 {
1999 VMSTATE enmState = VMR3GetState(pVM);
2000 switch (enmState)
2001 {
2002 case VMSTATE_FATAL_ERROR:
2003 case VMSTATE_FATAL_ERROR_LS:
2004 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2005 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2006 return VINF_EM_SUSPEND;
2007
2008 case VMSTATE_DESTROYING:
2009 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2010 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2011 return VINF_EM_TERMINATE;
2012
2013 default:
2014 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2015 }
2016 }
2017
2018 /*
2019 * Out of memory? Since most of our fellow high priority actions may cause us
2020 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2021 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2022 * than us since we can terminate without allocating more memory.
2023 */
2024 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2025 {
2026 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2027 UPDATE_RC();
2028 if (rc == VINF_EM_NO_MEMORY)
2029 return rc;
2030 }
2031
2032 /*
2033 * If the virtual sync clock is still stopped, make TM restart it.
2034 */
2035 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2036 TMR3VirtualSyncFF(pVM, pVCpu);
2037
2038#ifdef DEBUG
2039 /*
2040 * Debug, pause the VM.
2041 */
2042 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2043 {
2044 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2045 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2046 return VINF_EM_SUSPEND;
2047 }
2048#endif
2049
2050 /* check that we got them all */
2051 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2052 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2053 }
2054
2055#undef UPDATE_RC
2056 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2057 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2058 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2059 return rc;
2060}
2061
2062
2063/**
2064 * Check if the preset execution time cap restricts guest execution scheduling.
2065 *
2066 * @returns true if allowed, false otherwise
2067 * @param pVM Pointer to the VM.
2068 * @param pVCpu Pointer to the VMCPU.
2069 */
2070bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2071{
2072 uint64_t u64UserTime, u64KernelTime;
2073
2074 if ( pVM->uCpuExecutionCap != 100
2075 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2076 {
2077 uint64_t u64TimeNow = RTTimeMilliTS();
2078 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2079 {
2080 /* New time slice. */
2081 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2082 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2083 pVCpu->em.s.u64TimeSliceExec = 0;
2084 }
2085 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2086
2087 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2088 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2089 return false;
2090 }
2091 return true;
2092}
2093
2094
2095/**
2096 * Execute VM.
2097 *
2098 * This function is the main loop of the VM. The emulation thread
2099 * calls this function when the VM has been successfully constructed
2100 * and we're ready for executing the VM.
2101 *
2102 * Returning from this function means that the VM is turned off or
2103 * suspended (state already saved) and deconstruction is next in line.
2104 *
2105 * All interaction from other thread are done using forced actions
2106 * and signaling of the wait object.
2107 *
2108 * @returns VBox status code, informational status codes may indicate failure.
2109 * @param pVM Pointer to the VM.
2110 * @param pVCpu Pointer to the VMCPU.
2111 */
2112VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2113{
2114 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2115 pVM,
2116 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2117 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2118 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2119 pVCpu->em.s.fForceRAW));
2120 VM_ASSERT_EMT(pVM);
2121 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2122 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2123 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2124 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2125
2126 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2127 if (rc == 0)
2128 {
2129 /*
2130 * Start the virtual time.
2131 */
2132 TMR3NotifyResume(pVM, pVCpu);
2133
2134 /*
2135 * The Outer Main Loop.
2136 */
2137 bool fFFDone = false;
2138
2139 /* Reschedule right away to start in the right state. */
2140 rc = VINF_SUCCESS;
2141
2142 /* If resuming after a pause or a state load, restore the previous
2143 state or else we'll start executing code. Else, just reschedule. */
2144 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2145 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2146 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2147 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2148 else
2149 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2150 pVCpu->em.s.cIemThenRemInstructions = 0;
2151 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2152
2153 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2154 for (;;)
2155 {
2156 /*
2157 * Before we can schedule anything (we're here because
2158 * scheduling is required) we must service any pending
2159 * forced actions to avoid any pending action causing
2160 * immediate rescheduling upon entering an inner loop
2161 *
2162 * Do forced actions.
2163 */
2164 if ( !fFFDone
2165 && RT_SUCCESS(rc)
2166 && rc != VINF_EM_TERMINATE
2167 && rc != VINF_EM_OFF
2168 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2169 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2170 {
2171 rc = emR3ForcedActions(pVM, pVCpu, rc);
2172 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2173 if ( ( rc == VINF_EM_RESCHEDULE_REM
2174 || rc == VINF_EM_RESCHEDULE_HM)
2175 && pVCpu->em.s.fForceRAW)
2176 rc = VINF_EM_RESCHEDULE_RAW;
2177 }
2178 else if (fFFDone)
2179 fFFDone = false;
2180
2181 /*
2182 * Now what to do?
2183 */
2184 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2185 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2186 switch (rc)
2187 {
2188 /*
2189 * Keep doing what we're currently doing.
2190 */
2191 case VINF_SUCCESS:
2192 break;
2193
2194 /*
2195 * Reschedule - to raw-mode execution.
2196 */
2197 case VINF_EM_RESCHEDULE_RAW:
2198 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2199 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2200 pVCpu->em.s.enmState = EMSTATE_RAW;
2201 break;
2202
2203 /*
2204 * Reschedule - to hardware accelerated raw-mode execution.
2205 */
2206 case VINF_EM_RESCHEDULE_HM:
2207 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2208 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2209 Assert(!pVCpu->em.s.fForceRAW);
2210 pVCpu->em.s.enmState = EMSTATE_HM;
2211 break;
2212
2213 /*
2214 * Reschedule - to recompiled execution.
2215 */
2216 case VINF_EM_RESCHEDULE_REM:
2217#ifdef VBOX_WITH_FIRST_IEM_STEP
2218 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2219 if (HMIsEnabled(pVM))
2220 {
2221 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2222 enmOldState, EMSTATE_IEM_THEN_REM));
2223 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2224 {
2225 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2226 pVCpu->em.s.cIemThenRemInstructions = 0;
2227 }
2228 }
2229 else
2230 {
2231 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2232 pVCpu->em.s.enmState = EMSTATE_REM;
2233 }
2234#else
2235 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2236 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2237 pVCpu->em.s.enmState = EMSTATE_REM;
2238#endif
2239 break;
2240
2241 /*
2242 * Resume.
2243 */
2244 case VINF_EM_RESUME:
2245 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2246 /* Don't reschedule in the halted or wait for SIPI case. */
2247 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2248 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2249 {
2250 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2251 break;
2252 }
2253 /* fall through and get scheduled. */
2254
2255 /*
2256 * Reschedule.
2257 */
2258 case VINF_EM_RESCHEDULE:
2259 {
2260 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2261 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2262 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2263 pVCpu->em.s.cIemThenRemInstructions = 0;
2264 pVCpu->em.s.enmState = enmState;
2265 break;
2266 }
2267
2268 /*
2269 * Halted.
2270 */
2271 case VINF_EM_HALT:
2272 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2273 pVCpu->em.s.enmState = EMSTATE_HALTED;
2274 break;
2275
2276 /*
2277 * Switch to the wait for SIPI state (application processor only)
2278 */
2279 case VINF_EM_WAIT_SIPI:
2280 Assert(pVCpu->idCpu != 0);
2281 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2282 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2283 break;
2284
2285
2286 /*
2287 * Suspend.
2288 */
2289 case VINF_EM_SUSPEND:
2290 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2291 Assert(enmOldState != EMSTATE_SUSPENDED);
2292 pVCpu->em.s.enmPrevState = enmOldState;
2293 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2294 break;
2295
2296 /*
2297 * Reset.
2298 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2299 */
2300 case VINF_EM_RESET:
2301 {
2302 if (pVCpu->idCpu == 0)
2303 {
2304 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2305 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2306 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2307 pVCpu->em.s.cIemThenRemInstructions = 0;
2308 pVCpu->em.s.enmState = enmState;
2309 }
2310 else
2311 {
2312 /* All other VCPUs go into the wait for SIPI state. */
2313 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2314 }
2315 break;
2316 }
2317
2318 /*
2319 * Power Off.
2320 */
2321 case VINF_EM_OFF:
2322 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2323 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2324 TMR3NotifySuspend(pVM, pVCpu);
2325 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2326 return rc;
2327
2328 /*
2329 * Terminate the VM.
2330 */
2331 case VINF_EM_TERMINATE:
2332 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2333 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2334 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2335 TMR3NotifySuspend(pVM, pVCpu);
2336 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2337 return rc;
2338
2339
2340 /*
2341 * Out of memory, suspend the VM and stuff.
2342 */
2343 case VINF_EM_NO_MEMORY:
2344 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2345 Assert(enmOldState != EMSTATE_SUSPENDED);
2346 pVCpu->em.s.enmPrevState = enmOldState;
2347 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2348 TMR3NotifySuspend(pVM, pVCpu);
2349 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2350
2351 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2352 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2353 if (rc != VINF_EM_SUSPEND)
2354 {
2355 if (RT_SUCCESS_NP(rc))
2356 {
2357 AssertLogRelMsgFailed(("%Rrc\n", rc));
2358 rc = VERR_EM_INTERNAL_ERROR;
2359 }
2360 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2361 }
2362 return rc;
2363
2364 /*
2365 * Guest debug events.
2366 */
2367 case VINF_EM_DBG_STEPPED:
2368 case VINF_EM_DBG_STOP:
2369 case VINF_EM_DBG_BREAKPOINT:
2370 case VINF_EM_DBG_STEP:
2371 if (enmOldState == EMSTATE_RAW)
2372 {
2373 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2374 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2375 }
2376 else if (enmOldState == EMSTATE_HM)
2377 {
2378 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2379 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2380 }
2381 else if (enmOldState == EMSTATE_REM)
2382 {
2383 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2384 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2385 }
2386 else
2387 {
2388 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2389 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2390 }
2391 break;
2392
2393 /*
2394 * Hypervisor debug events.
2395 */
2396 case VINF_EM_DBG_HYPER_STEPPED:
2397 case VINF_EM_DBG_HYPER_BREAKPOINT:
2398 case VINF_EM_DBG_HYPER_ASSERTION:
2399 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2400 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2401 break;
2402
2403 /*
2404 * Triple fault.
2405 */
2406 case VINF_EM_TRIPLE_FAULT:
2407 if (!pVM->em.s.fGuruOnTripleFault)
2408 {
2409 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2410 Assert(pVM->cCpus == 1);
2411 REMR3Reset(pVM);
2412 PGMR3ResetCpu(pVM, pVCpu);
2413 TRPMR3ResetCpu(pVCpu);
2414 CPUMR3ResetCpu(pVM, pVCpu);
2415 EMR3ResetCpu(pVCpu);
2416 HMR3ResetCpu(pVCpu);
2417 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2418 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d\n", rc, enmOldState, pVCpu->em.s.enmState));
2419 break;
2420 }
2421 /* Else fall through and trigger a guru. */
2422 case VERR_VMM_RING0_ASSERTION:
2423 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2424 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2425 break;
2426
2427 /*
2428 * Any error code showing up here other than the ones we
2429 * know and process above are considered to be FATAL.
2430 *
2431 * Unknown warnings and informational status codes are also
2432 * included in this.
2433 */
2434 default:
2435 if (RT_SUCCESS_NP(rc))
2436 {
2437 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2438 rc = VERR_EM_INTERNAL_ERROR;
2439 }
2440 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2441 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2442 break;
2443 }
2444
2445 /*
2446 * Act on state transition.
2447 */
2448 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2449 if (enmOldState != enmNewState)
2450 {
2451 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2452
2453 /* Clear MWait flags. */
2454 if ( enmOldState == EMSTATE_HALTED
2455 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2456 && ( enmNewState == EMSTATE_RAW
2457 || enmNewState == EMSTATE_HM
2458 || enmNewState == EMSTATE_REM
2459 || enmNewState == EMSTATE_IEM_THEN_REM
2460 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2461 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2462 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2463 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2464 {
2465 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2466 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2467 }
2468 }
2469 else
2470 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2471
2472 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2473 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2474
2475 /*
2476 * Act on the new state.
2477 */
2478 switch (enmNewState)
2479 {
2480 /*
2481 * Execute raw.
2482 */
2483 case EMSTATE_RAW:
2484#ifdef VBOX_WITH_RAW_MODE
2485 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2486#else
2487 AssertLogRelMsgFailed(("%Rrc\n", rc));
2488 rc = VERR_EM_INTERNAL_ERROR;
2489#endif
2490 break;
2491
2492 /*
2493 * Execute hardware accelerated raw.
2494 */
2495 case EMSTATE_HM:
2496 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2497 break;
2498
2499 /*
2500 * Execute recompiled.
2501 */
2502 case EMSTATE_REM:
2503 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2504 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2505 break;
2506
2507 /*
2508 * Execute in the interpreter.
2509 */
2510 case EMSTATE_IEM:
2511 {
2512#if 0 /* For testing purposes. */
2513 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2514 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2515 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2516 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2517 rc = VINF_SUCCESS;
2518 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2519#endif
2520 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
2521 if (pVM->em.s.fIemExecutesAll)
2522 {
2523 Assert(rc != VINF_EM_RESCHEDULE_REM);
2524 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2525 Assert(rc != VINF_EM_RESCHEDULE_HM);
2526 }
2527 fFFDone = false;
2528 break;
2529 }
2530
2531 /*
2532 * Execute in IEM, hoping we can quickly switch aback to HM
2533 * or RAW execution. If our hopes fail, we go to REM.
2534 */
2535 case EMSTATE_IEM_THEN_REM:
2536 {
2537 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2538 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2539 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2540 break;
2541 }
2542
2543 /*
2544 * Application processor execution halted until SIPI.
2545 */
2546 case EMSTATE_WAIT_SIPI:
2547 /* no break */
2548 /*
2549 * hlt - execution halted until interrupt.
2550 */
2551 case EMSTATE_HALTED:
2552 {
2553 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2554 /* If HM (or someone else) store a pending interrupt in
2555 TRPM, it must be dispatched ASAP without any halting.
2556 Anything pending in TRPM has been accepted and the CPU
2557 should already be the right state to receive it. */
2558 if (TRPMHasTrap(pVCpu))
2559 rc = VINF_EM_RESCHEDULE;
2560 /* MWAIT has a special extension where it's woken up when
2561 an interrupt is pending even when IF=0. */
2562 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2563 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2564 {
2565 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2566 if ( rc == VINF_SUCCESS
2567 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2568 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2569 {
2570 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2571 rc = VINF_EM_RESCHEDULE;
2572 }
2573 }
2574 else
2575 {
2576 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2577 if ( rc == VINF_SUCCESS
2578 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2579 {
2580 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2581 rc = VINF_EM_RESCHEDULE;
2582 }
2583 }
2584
2585 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2586 break;
2587 }
2588
2589 /*
2590 * Suspended - return to VM.cpp.
2591 */
2592 case EMSTATE_SUSPENDED:
2593 TMR3NotifySuspend(pVM, pVCpu);
2594 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2595 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2596 return VINF_EM_SUSPEND;
2597
2598 /*
2599 * Debugging in the guest.
2600 */
2601 case EMSTATE_DEBUG_GUEST_RAW:
2602 case EMSTATE_DEBUG_GUEST_HM:
2603 case EMSTATE_DEBUG_GUEST_IEM:
2604 case EMSTATE_DEBUG_GUEST_REM:
2605 TMR3NotifySuspend(pVM, pVCpu);
2606 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2607 TMR3NotifyResume(pVM, pVCpu);
2608 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2609 break;
2610
2611 /*
2612 * Debugging in the hypervisor.
2613 */
2614 case EMSTATE_DEBUG_HYPER:
2615 {
2616 TMR3NotifySuspend(pVM, pVCpu);
2617 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2618
2619 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2620 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2621 if (rc != VINF_SUCCESS)
2622 {
2623 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2624 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2625 else
2626 {
2627 /* switch to guru meditation mode */
2628 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2629 VMMR3FatalDump(pVM, pVCpu, rc);
2630 }
2631 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2632 return rc;
2633 }
2634
2635 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2636 TMR3NotifyResume(pVM, pVCpu);
2637 break;
2638 }
2639
2640 /*
2641 * Guru meditation takes place in the debugger.
2642 */
2643 case EMSTATE_GURU_MEDITATION:
2644 {
2645 TMR3NotifySuspend(pVM, pVCpu);
2646 VMMR3FatalDump(pVM, pVCpu, rc);
2647 emR3Debug(pVM, pVCpu, rc);
2648 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2649 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2650 return rc;
2651 }
2652
2653 /*
2654 * The states we don't expect here.
2655 */
2656 case EMSTATE_NONE:
2657 case EMSTATE_TERMINATING:
2658 default:
2659 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2660 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2661 TMR3NotifySuspend(pVM, pVCpu);
2662 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2663 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2664 return VERR_EM_INTERNAL_ERROR;
2665 }
2666 } /* The Outer Main Loop */
2667 }
2668 else
2669 {
2670 /*
2671 * Fatal error.
2672 */
2673 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2674 TMR3NotifySuspend(pVM, pVCpu);
2675 VMMR3FatalDump(pVM, pVCpu, rc);
2676 emR3Debug(pVM, pVCpu, rc);
2677 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2678 /** @todo change the VM state! */
2679 return rc;
2680 }
2681
2682 /* (won't ever get here). */
2683 AssertFailed();
2684}
2685
2686/**
2687 * Notify EM of a state change (used by FTM)
2688 *
2689 * @param pVM Pointer to the VM.
2690 */
2691VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2692{
2693 PVMCPU pVCpu = VMMGetCpu(pVM);
2694
2695 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2696 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2697 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2698 return VINF_SUCCESS;
2699}
2700
2701/**
2702 * Notify EM of a state change (used by FTM)
2703 *
2704 * @param pVM Pointer to the VM.
2705 */
2706VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2707{
2708 PVMCPU pVCpu = VMMGetCpu(pVM);
2709 EMSTATE enmCurState = pVCpu->em.s.enmState;
2710
2711 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2712 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2713 pVCpu->em.s.enmPrevState = enmCurState;
2714 return VINF_SUCCESS;
2715}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette