VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 58591

Last change on this file since 58591 was 58123, checked in by vboxsync, 9 years ago

VMM: Made @param pVCpu more uniform and to the point.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 123.1 KB
Line 
1/* $Id: EM.cpp 58123 2015-10-08 18:09:45Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#include <VBox/vmm/tm.h>
53#include <VBox/vmm/mm.h>
54#include <VBox/vmm/ssm.h>
55#include <VBox/vmm/pdmapi.h>
56#include <VBox/vmm/pdmcritsect.h>
57#include <VBox/vmm/pdmqueue.h>
58#include <VBox/vmm/hm.h>
59#include <VBox/vmm/patm.h>
60#include "EMInternal.h"
61#include <VBox/vmm/vm.h>
62#include <VBox/vmm/uvm.h>
63#include <VBox/vmm/cpumdis.h>
64#include <VBox/dis.h>
65#include <VBox/disopcode.h>
66#include "VMMTracing.h"
67
68#include <iprt/asm.h>
69#include <iprt/string.h>
70#include <iprt/stream.h>
71#include <iprt/thread.h>
72
73
74/*********************************************************************************************************************************
75* Defined Constants And Macros *
76*********************************************************************************************************************************/
77#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
78#define EM_NOTIFY_HM
79#endif
80
81
82/*********************************************************************************************************************************
83* Internal Functions *
84*********************************************************************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
88static const char *emR3GetStateName(EMSTATE enmState);
89#endif
90static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
91static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
92static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
93int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
94
95
96/**
97 * Initializes the EM.
98 *
99 * @returns VBox status code.
100 * @param pVM The cross context VM structure.
101 */
102VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
103{
104 LogFlow(("EMR3Init\n"));
105 /*
106 * Assert alignment and sizes.
107 */
108 AssertCompileMemberAlignment(VM, em.s, 32);
109 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
110 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
111
112 /*
113 * Init the structure.
114 */
115 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
116 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
117 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
118
119 bool fEnabled;
120 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
121 AssertLogRelRCReturn(rc, rc);
122 pVM->fRecompileUser = !fEnabled;
123
124 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileSupervisor = !fEnabled;
127
128#ifdef VBOX_WITH_RAW_RING1
129 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
130 AssertLogRelRCReturn(rc, rc);
131#else
132 pVM->fRawRing1Enabled = false; /* Disabled by default. */
133#endif
134
135 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
136 AssertLogRelRCReturn(rc, rc);
137
138 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
139 AssertLogRelRCReturn(rc, rc);
140 pVM->em.s.fGuruOnTripleFault = !fEnabled;
141 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
142 {
143 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
144 pVM->em.s.fGuruOnTripleFault = true;
145 }
146
147 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
148 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
149
150#ifdef VBOX_WITH_REM
151 /*
152 * Initialize the REM critical section.
153 */
154 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
155 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
156 AssertRCReturn(rc, rc);
157#endif
158
159 /*
160 * Saved state.
161 */
162 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
163 NULL, NULL, NULL,
164 NULL, emR3Save, NULL,
165 NULL, emR3Load, NULL);
166 if (RT_FAILURE(rc))
167 return rc;
168
169 for (VMCPUID i = 0; i < pVM->cCpus; i++)
170 {
171 PVMCPU pVCpu = &pVM->aCpus[i];
172
173 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
174 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
175 pVCpu->em.s.fForceRAW = false;
176
177 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
178#ifdef VBOX_WITH_RAW_MODE
179 if (!HMIsEnabled(pVM))
180 {
181 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
182 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
183 }
184#endif
185
186 /* Force reset of the time slice. */
187 pVCpu->em.s.u64TimeSliceStart = 0;
188
189# define EM_REG_COUNTER(a, b, c) \
190 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
191 AssertRC(rc);
192
193# define EM_REG_COUNTER_USED(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_PROFILE(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
199 AssertRC(rc);
200
201# define EM_REG_PROFILE_ADV(a, b, c) \
202 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
203 AssertRC(rc);
204
205 /*
206 * Statistics.
207 */
208#ifdef VBOX_WITH_STATISTICS
209 PEMSTATS pStats;
210 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
211 if (RT_FAILURE(rc))
212 return rc;
213
214 pVCpu->em.s.pStatsR3 = pStats;
215 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
216 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
217
218 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
219 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
220
221 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
222 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
223
224 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
298
299 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
300 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
301
302 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
354
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
357 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
383
384 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
385 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
386 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
387 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
388
389 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
390 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
391 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
392 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
393 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
394 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
415
416 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
417 pVCpu->em.s.pCliStatTree = 0;
418
419 /* these should be considered for release statistics. */
420 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
421 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
422 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
423 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
424 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
426 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
432
433#endif /* VBOX_WITH_STATISTICS */
434
435 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
436 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
437 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
438 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
439 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
440
441 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
442 }
443
444 emR3InitDbg(pVM);
445 return VINF_SUCCESS;
446}
447
448
449/**
450 * Applies relocations to data and code managed by this
451 * component. This function will be called at init and
452 * whenever the VMM need to relocate it self inside the GC.
453 *
454 * @param pVM The cross context VM structure.
455 */
456VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
457{
458 LogFlow(("EMR3Relocate\n"));
459 for (VMCPUID i = 0; i < pVM->cCpus; i++)
460 {
461 PVMCPU pVCpu = &pVM->aCpus[i];
462 if (pVCpu->em.s.pStatsR3)
463 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
464 }
465}
466
467
468/**
469 * Reset the EM state for a CPU.
470 *
471 * Called by EMR3Reset and hot plugging.
472 *
473 * @param pVCpu The cross context virtual CPU structure.
474 */
475VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
476{
477 pVCpu->em.s.fForceRAW = false;
478
479 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
480 out of the HALTED state here so that enmPrevState doesn't end up as
481 HALTED when EMR3Execute returns. */
482 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
483 {
484 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
485 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
486 }
487}
488
489
490/**
491 * Reset notification.
492 *
493 * @param pVM The cross context VM structure.
494 */
495VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
496{
497 Log(("EMR3Reset: \n"));
498 for (VMCPUID i = 0; i < pVM->cCpus; i++)
499 EMR3ResetCpu(&pVM->aCpus[i]);
500}
501
502
503/**
504 * Terminates the EM.
505 *
506 * Termination means cleaning up and freeing all resources,
507 * the VM it self is at this point powered off or suspended.
508 *
509 * @returns VBox status code.
510 * @param pVM The cross context VM structure.
511 */
512VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
513{
514 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
515
516#ifdef VBOX_WITH_REM
517 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
518#endif
519 return VINF_SUCCESS;
520}
521
522
523/**
524 * Execute state save operation.
525 *
526 * @returns VBox status code.
527 * @param pVM The cross context VM structure.
528 * @param pSSM SSM operation handle.
529 */
530static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
531{
532 for (VMCPUID i = 0; i < pVM->cCpus; i++)
533 {
534 PVMCPU pVCpu = &pVM->aCpus[i];
535
536 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
537 AssertRCReturn(rc, rc);
538
539 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
540 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
541 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
542 AssertRCReturn(rc, rc);
543
544 /* Save mwait state. */
545 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
546 AssertRCReturn(rc, rc);
547 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
548 AssertRCReturn(rc, rc);
549 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
550 AssertRCReturn(rc, rc);
551 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
552 AssertRCReturn(rc, rc);
553 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
554 AssertRCReturn(rc, rc);
555 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
556 AssertRCReturn(rc, rc);
557 }
558 return VINF_SUCCESS;
559}
560
561
562/**
563 * Execute state load operation.
564 *
565 * @returns VBox status code.
566 * @param pVM The cross context VM structure.
567 * @param pSSM SSM operation handle.
568 * @param uVersion Data layout version.
569 * @param uPass The data pass.
570 */
571static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
572{
573 /*
574 * Validate version.
575 */
576 if ( uVersion > EM_SAVED_STATE_VERSION
577 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
578 {
579 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
580 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
581 }
582 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
583
584 /*
585 * Load the saved state.
586 */
587 for (VMCPUID i = 0; i < pVM->cCpus; i++)
588 {
589 PVMCPU pVCpu = &pVM->aCpus[i];
590
591 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
592 if (RT_FAILURE(rc))
593 pVCpu->em.s.fForceRAW = false;
594 AssertRCReturn(rc, rc);
595
596 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
597 {
598 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
599 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
600 AssertRCReturn(rc, rc);
601 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
602
603 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
604 }
605 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
606 {
607 /* Load mwait state. */
608 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
609 AssertRCReturn(rc, rc);
610 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
611 AssertRCReturn(rc, rc);
612 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
613 AssertRCReturn(rc, rc);
614 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
615 AssertRCReturn(rc, rc);
616 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
617 AssertRCReturn(rc, rc);
618 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
619 AssertRCReturn(rc, rc);
620 }
621
622 Assert(!pVCpu->em.s.pCliStatTree);
623 }
624 return VINF_SUCCESS;
625}
626
627
628/**
629 * Argument packet for emR3SetExecutionPolicy.
630 */
631struct EMR3SETEXECPOLICYARGS
632{
633 EMEXECPOLICY enmPolicy;
634 bool fEnforce;
635};
636
637
638/**
639 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
640 */
641static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
642{
643 /*
644 * Only the first CPU changes the variables.
645 */
646 if (pVCpu->idCpu == 0)
647 {
648 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
649 switch (pArgs->enmPolicy)
650 {
651 case EMEXECPOLICY_RECOMPILE_RING0:
652 pVM->fRecompileSupervisor = pArgs->fEnforce;
653 break;
654 case EMEXECPOLICY_RECOMPILE_RING3:
655 pVM->fRecompileUser = pArgs->fEnforce;
656 break;
657 case EMEXECPOLICY_IEM_ALL:
658 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
659 break;
660 default:
661 AssertFailedReturn(VERR_INVALID_PARAMETER);
662 }
663 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
664 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
665 }
666
667 /*
668 * Force rescheduling if in RAW, HM, IEM, or REM.
669 */
670 return pVCpu->em.s.enmState == EMSTATE_RAW
671 || pVCpu->em.s.enmState == EMSTATE_HM
672 || pVCpu->em.s.enmState == EMSTATE_IEM
673 || pVCpu->em.s.enmState == EMSTATE_REM
674 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
675 ? VINF_EM_RESCHEDULE
676 : VINF_SUCCESS;
677}
678
679
680/**
681 * Changes an execution scheduling policy parameter.
682 *
683 * This is used to enable or disable raw-mode / hardware-virtualization
684 * execution of user and supervisor code.
685 *
686 * @returns VINF_SUCCESS on success.
687 * @returns VINF_RESCHEDULE if a rescheduling might be required.
688 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
689 *
690 * @param pUVM The user mode VM handle.
691 * @param enmPolicy The scheduling policy to change.
692 * @param fEnforce Whether to enforce the policy or not.
693 */
694VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
695{
696 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
697 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
698 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
699
700 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
701 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
702}
703
704
705/**
706 * Queries an execution scheduling policy parameter.
707 *
708 * @returns VBox status code
709 * @param pUVM The user mode VM handle.
710 * @param enmPolicy The scheduling policy to query.
711 * @param pfEnforced Where to return the current value.
712 */
713VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
714{
715 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
716 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
717 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
718 PVM pVM = pUVM->pVM;
719 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
720
721 /* No need to bother EMTs with a query. */
722 switch (enmPolicy)
723 {
724 case EMEXECPOLICY_RECOMPILE_RING0:
725 *pfEnforced = pVM->fRecompileSupervisor;
726 break;
727 case EMEXECPOLICY_RECOMPILE_RING3:
728 *pfEnforced = pVM->fRecompileUser;
729 break;
730 case EMEXECPOLICY_IEM_ALL:
731 *pfEnforced = pVM->em.s.fIemExecutesAll;
732 break;
733 default:
734 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
735 }
736
737 return VINF_SUCCESS;
738}
739
740
741/**
742 * Raise a fatal error.
743 *
744 * Safely terminate the VM with full state report and stuff. This function
745 * will naturally never return.
746 *
747 * @param pVCpu The cross context virtual CPU structure.
748 * @param rc VBox status code.
749 */
750VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
751{
752 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
753 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
754 AssertReleaseMsgFailed(("longjmp returned!\n"));
755}
756
757
758#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
759/**
760 * Gets the EM state name.
761 *
762 * @returns pointer to read only state name,
763 * @param enmState The state.
764 */
765static const char *emR3GetStateName(EMSTATE enmState)
766{
767 switch (enmState)
768 {
769 case EMSTATE_NONE: return "EMSTATE_NONE";
770 case EMSTATE_RAW: return "EMSTATE_RAW";
771 case EMSTATE_HM: return "EMSTATE_HM";
772 case EMSTATE_IEM: return "EMSTATE_IEM";
773 case EMSTATE_REM: return "EMSTATE_REM";
774 case EMSTATE_HALTED: return "EMSTATE_HALTED";
775 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
776 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
777 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
778 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
779 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
780 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
781 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
782 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
783 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
784 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
785 default: return "Unknown!";
786 }
787}
788#endif /* LOG_ENABLED || VBOX_STRICT */
789
790
791/**
792 * Debug loop.
793 *
794 * @returns VBox status code for EM.
795 * @param pVM The cross context VM structure.
796 * @param pVCpu The cross context virtual CPU structure.
797 * @param rc Current EM VBox status code.
798 */
799static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
800{
801 for (;;)
802 {
803 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
804 const VBOXSTRICTRC rcLast = rc;
805
806 /*
807 * Debug related RC.
808 */
809 switch (VBOXSTRICTRC_VAL(rc))
810 {
811 /*
812 * Single step an instruction.
813 */
814 case VINF_EM_DBG_STEP:
815 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
816 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
817 || pVCpu->em.s.fForceRAW /* paranoia */)
818#ifdef VBOX_WITH_RAW_MODE
819 rc = emR3RawStep(pVM, pVCpu);
820#else
821 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
822#endif
823 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
824 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
825#ifdef VBOX_WITH_REM
826 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
827 rc = emR3RemStep(pVM, pVCpu);
828#endif
829 else
830 {
831 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
832 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
833 rc = VINF_EM_DBG_STEPPED;
834 }
835 break;
836
837 /*
838 * Simple events: stepped, breakpoint, stop/assertion.
839 */
840 case VINF_EM_DBG_STEPPED:
841 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
842 break;
843
844 case VINF_EM_DBG_BREAKPOINT:
845 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
846 break;
847
848 case VINF_EM_DBG_STOP:
849 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
850 break;
851
852 case VINF_EM_DBG_HYPER_STEPPED:
853 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
854 break;
855
856 case VINF_EM_DBG_HYPER_BREAKPOINT:
857 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
858 break;
859
860 case VINF_EM_DBG_HYPER_ASSERTION:
861 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
862 RTLogFlush(NULL);
863 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
864 break;
865
866 /*
867 * Guru meditation.
868 */
869 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
870 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
871 break;
872 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
873 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
874 break;
875
876 default: /** @todo don't use default for guru, but make special errors code! */
877 {
878 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
879 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
880 break;
881 }
882 }
883
884 /*
885 * Process the result.
886 */
887 do
888 {
889 switch (VBOXSTRICTRC_VAL(rc))
890 {
891 /*
892 * Continue the debugging loop.
893 */
894 case VINF_EM_DBG_STEP:
895 case VINF_EM_DBG_STOP:
896 case VINF_EM_DBG_STEPPED:
897 case VINF_EM_DBG_BREAKPOINT:
898 case VINF_EM_DBG_HYPER_STEPPED:
899 case VINF_EM_DBG_HYPER_BREAKPOINT:
900 case VINF_EM_DBG_HYPER_ASSERTION:
901 break;
902
903 /*
904 * Resuming execution (in some form) has to be done here if we got
905 * a hypervisor debug event.
906 */
907 case VINF_SUCCESS:
908 case VINF_EM_RESUME:
909 case VINF_EM_SUSPEND:
910 case VINF_EM_RESCHEDULE:
911 case VINF_EM_RESCHEDULE_RAW:
912 case VINF_EM_RESCHEDULE_REM:
913 case VINF_EM_HALT:
914 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
915 {
916#ifdef VBOX_WITH_RAW_MODE
917 rc = emR3RawResumeHyper(pVM, pVCpu);
918 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
919 continue;
920#else
921 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
922#endif
923 }
924 if (rc == VINF_SUCCESS)
925 rc = VINF_EM_RESCHEDULE;
926 return rc;
927
928 /*
929 * The debugger isn't attached.
930 * We'll simply turn the thing off since that's the easiest thing to do.
931 */
932 case VERR_DBGF_NOT_ATTACHED:
933 switch (VBOXSTRICTRC_VAL(rcLast))
934 {
935 case VINF_EM_DBG_HYPER_STEPPED:
936 case VINF_EM_DBG_HYPER_BREAKPOINT:
937 case VINF_EM_DBG_HYPER_ASSERTION:
938 case VERR_TRPM_PANIC:
939 case VERR_TRPM_DONT_PANIC:
940 case VERR_VMM_RING0_ASSERTION:
941 case VERR_VMM_HYPER_CR3_MISMATCH:
942 case VERR_VMM_RING3_CALL_DISABLED:
943 return rcLast;
944 }
945 return VINF_EM_OFF;
946
947 /*
948 * Status codes terminating the VM in one or another sense.
949 */
950 case VINF_EM_TERMINATE:
951 case VINF_EM_OFF:
952 case VINF_EM_RESET:
953 case VINF_EM_NO_MEMORY:
954 case VINF_EM_RAW_STALE_SELECTOR:
955 case VINF_EM_RAW_IRET_TRAP:
956 case VERR_TRPM_PANIC:
957 case VERR_TRPM_DONT_PANIC:
958 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
959 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
960 case VERR_VMM_RING0_ASSERTION:
961 case VERR_VMM_HYPER_CR3_MISMATCH:
962 case VERR_VMM_RING3_CALL_DISABLED:
963 case VERR_INTERNAL_ERROR:
964 case VERR_INTERNAL_ERROR_2:
965 case VERR_INTERNAL_ERROR_3:
966 case VERR_INTERNAL_ERROR_4:
967 case VERR_INTERNAL_ERROR_5:
968 case VERR_IPE_UNEXPECTED_STATUS:
969 case VERR_IPE_UNEXPECTED_INFO_STATUS:
970 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
971 return rc;
972
973 /*
974 * The rest is unexpected, and will keep us here.
975 */
976 default:
977 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
978 break;
979 }
980 } while (false);
981 } /* debug for ever */
982}
983
984
985/**
986 * Steps recompiled code.
987 *
988 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
989 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
990 *
991 * @param pVM The cross context VM structure.
992 * @param pVCpu The cross context virtual CPU structure.
993 */
994static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
995{
996 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
997
998#ifdef VBOX_WITH_REM
999 EMRemLock(pVM);
1000
1001 /*
1002 * Switch to REM, step instruction, switch back.
1003 */
1004 int rc = REMR3State(pVM, pVCpu);
1005 if (RT_SUCCESS(rc))
1006 {
1007 rc = REMR3Step(pVM, pVCpu);
1008 REMR3StateBack(pVM, pVCpu);
1009 }
1010 EMRemUnlock(pVM);
1011
1012#else
1013 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1014#endif
1015
1016 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1017 return rc;
1018}
1019
1020
1021/**
1022 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1023 * critical section.
1024 *
1025 * @returns false - new fInREMState value.
1026 * @param pVM The cross context VM structure.
1027 * @param pVCpu The cross context virtual CPU structure.
1028 */
1029DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1030{
1031#ifdef VBOX_WITH_REM
1032 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1033 REMR3StateBack(pVM, pVCpu);
1034 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1035
1036 EMRemUnlock(pVM);
1037#endif
1038 return false;
1039}
1040
1041
1042/**
1043 * Executes recompiled code.
1044 *
1045 * This function contains the recompiler version of the inner
1046 * execution loop (the outer loop being in EMR3ExecuteVM()).
1047 *
1048 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1049 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1050 *
1051 * @param pVM The cross context VM structure.
1052 * @param pVCpu The cross context virtual CPU structure.
1053 * @param pfFFDone Where to store an indicator telling whether or not
1054 * FFs were done before returning.
1055 *
1056 */
1057static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1058{
1059#ifdef LOG_ENABLED
1060 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1061 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1062
1063 if (pCtx->eflags.Bits.u1VM)
1064 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1065 else
1066 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1067#endif
1068 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1069
1070#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1071 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1072 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1073 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1074#endif
1075
1076 /*
1077 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1078 * or the REM suggests raw-mode execution.
1079 */
1080 *pfFFDone = false;
1081#ifdef VBOX_WITH_REM
1082 bool fInREMState = false;
1083#endif
1084 int rc = VINF_SUCCESS;
1085 for (;;)
1086 {
1087#ifdef VBOX_WITH_REM
1088 /*
1089 * Lock REM and update the state if not already in sync.
1090 *
1091 * Note! Big lock, but you are not supposed to own any lock when
1092 * coming in here.
1093 */
1094 if (!fInREMState)
1095 {
1096 EMRemLock(pVM);
1097 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1098
1099 /* Flush the recompiler translation blocks if the VCPU has changed,
1100 also force a full CPU state resync. */
1101 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1102 {
1103 REMFlushTBs(pVM);
1104 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1105 }
1106 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1107
1108 rc = REMR3State(pVM, pVCpu);
1109
1110 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1111 if (RT_FAILURE(rc))
1112 break;
1113 fInREMState = true;
1114
1115 /*
1116 * We might have missed the raising of VMREQ, TIMER and some other
1117 * important FFs while we were busy switching the state. So, check again.
1118 */
1119 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1120 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1121 {
1122 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1123 goto l_REMDoForcedActions;
1124 }
1125 }
1126#endif
1127
1128 /*
1129 * Execute REM.
1130 */
1131 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1132 {
1133 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1134#ifdef VBOX_WITH_REM
1135 rc = REMR3Run(pVM, pVCpu);
1136#else
1137 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
1138#endif
1139 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1140 }
1141 else
1142 {
1143 /* Give up this time slice; virtual time continues */
1144 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1145 RTThreadSleep(5);
1146 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1147 rc = VINF_SUCCESS;
1148 }
1149
1150 /*
1151 * Deal with high priority post execution FFs before doing anything
1152 * else. Sync back the state and leave the lock to be on the safe side.
1153 */
1154 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1155 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1156 {
1157#ifdef VBOX_WITH_REM
1158 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1159#endif
1160 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1161 }
1162
1163 /*
1164 * Process the returned status code.
1165 */
1166 if (rc != VINF_SUCCESS)
1167 {
1168 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1169 break;
1170 if (rc != VINF_REM_INTERRUPED_FF)
1171 {
1172 /*
1173 * Anything which is not known to us means an internal error
1174 * and the termination of the VM!
1175 */
1176 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1177 break;
1178 }
1179 }
1180
1181
1182 /*
1183 * Check and execute forced actions.
1184 *
1185 * Sync back the VM state and leave the lock before calling any of
1186 * these, you never know what's going to happen here.
1187 */
1188#ifdef VBOX_HIGH_RES_TIMERS_HACK
1189 TMTimerPollVoid(pVM, pVCpu);
1190#endif
1191 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1192 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1193 || VMCPU_FF_IS_PENDING(pVCpu,
1194 VMCPU_FF_ALL_REM_MASK
1195 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1196 {
1197l_REMDoForcedActions:
1198#ifdef VBOX_WITH_REM
1199 if (fInREMState)
1200 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1201#endif
1202 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1203 rc = emR3ForcedActions(pVM, pVCpu, rc);
1204 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1205 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1206 if ( rc != VINF_SUCCESS
1207 && rc != VINF_EM_RESCHEDULE_REM)
1208 {
1209 *pfFFDone = true;
1210 break;
1211 }
1212 }
1213
1214 } /* The Inner Loop, recompiled execution mode version. */
1215
1216
1217#ifdef VBOX_WITH_REM
1218 /*
1219 * Returning. Sync back the VM state if required.
1220 */
1221 if (fInREMState)
1222 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1223#endif
1224
1225 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1226 return rc;
1227}
1228
1229
1230#ifdef DEBUG
1231
1232int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1233{
1234 EMSTATE enmOldState = pVCpu->em.s.enmState;
1235
1236 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1237
1238 Log(("Single step BEGIN:\n"));
1239 for (uint32_t i = 0; i < cIterations; i++)
1240 {
1241 DBGFR3PrgStep(pVCpu);
1242 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1243 emR3RemStep(pVM, pVCpu);
1244 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1245 break;
1246 }
1247 Log(("Single step END:\n"));
1248 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1249 pVCpu->em.s.enmState = enmOldState;
1250 return VINF_EM_RESCHEDULE;
1251}
1252
1253#endif /* DEBUG */
1254
1255
1256/**
1257 * Try execute the problematic code in IEM first, then fall back on REM if there
1258 * is too much of it or if IEM doesn't implement something.
1259 *
1260 * @returns Strict VBox status code from IEMExecLots.
1261 * @param pVM The cross context VM structure.
1262 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1263 * @param pfFFDone Force flags done indicator.
1264 *
1265 * @thread EMT(pVCpu)
1266 */
1267static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1268{
1269 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1270 *pfFFDone = false;
1271
1272 /*
1273 * Execute in IEM for a while.
1274 */
1275 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1276 {
1277 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu);
1278 if (rcStrict != VINF_SUCCESS)
1279 {
1280 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1281 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1282 break;
1283
1284 pVCpu->em.s.cIemThenRemInstructions++;
1285 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1286 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1287 return rcStrict;
1288 }
1289 pVCpu->em.s.cIemThenRemInstructions++;
1290
1291 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1292 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1293 {
1294 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1295 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1296 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1297 pVCpu->em.s.enmState = enmNewState;
1298 return VINF_SUCCESS;
1299 }
1300
1301 /*
1302 * Check for pending actions.
1303 */
1304 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1305 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1306 return VINF_SUCCESS;
1307 }
1308
1309 /*
1310 * Switch to REM.
1311 */
1312 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1313 pVCpu->em.s.enmState = EMSTATE_REM;
1314 return VINF_SUCCESS;
1315}
1316
1317
1318/**
1319 * Decides whether to execute RAW, HWACC or REM.
1320 *
1321 * @returns new EM state
1322 * @param pVM The cross context VM structure.
1323 * @param pVCpu The cross context virtual CPU structure.
1324 * @param pCtx Pointer to the guest CPU context.
1325 */
1326EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1327{
1328 /*
1329 * When forcing raw-mode execution, things are simple.
1330 */
1331 if (pVCpu->em.s.fForceRAW)
1332 return EMSTATE_RAW;
1333
1334 /*
1335 * We stay in the wait for SIPI state unless explicitly told otherwise.
1336 */
1337 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1338 return EMSTATE_WAIT_SIPI;
1339
1340 /*
1341 * Execute everything in IEM?
1342 */
1343 if (pVM->em.s.fIemExecutesAll)
1344 return EMSTATE_IEM;
1345
1346 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1347 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1348 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1349
1350 X86EFLAGS EFlags = pCtx->eflags;
1351 if (HMIsEnabled(pVM))
1352 {
1353 /*
1354 * Hardware accelerated raw-mode:
1355 */
1356 if ( EMIsHwVirtExecutionEnabled(pVM)
1357 && HMR3CanExecuteGuest(pVM, pCtx))
1358 return EMSTATE_HM;
1359
1360 /*
1361 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1362 * turns off monitoring features essential for raw mode!
1363 */
1364 return EMSTATE_IEM_THEN_REM;
1365 }
1366
1367 /*
1368 * Standard raw-mode:
1369 *
1370 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1371 * or 32 bits protected mode ring 0 code
1372 *
1373 * The tests are ordered by the likelihood of being true during normal execution.
1374 */
1375 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1376 {
1377 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1378 return EMSTATE_REM;
1379 }
1380
1381# ifndef VBOX_RAW_V86
1382 if (EFlags.u32 & X86_EFL_VM) {
1383 Log2(("raw mode refused: VM_MASK\n"));
1384 return EMSTATE_REM;
1385 }
1386# endif
1387
1388 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1389 uint32_t u32CR0 = pCtx->cr0;
1390 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1391 {
1392 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1393 return EMSTATE_REM;
1394 }
1395
1396 if (pCtx->cr4 & X86_CR4_PAE)
1397 {
1398 uint32_t u32Dummy, u32Features;
1399
1400 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1401 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1402 return EMSTATE_REM;
1403 }
1404
1405 unsigned uSS = pCtx->ss.Sel;
1406 if ( pCtx->eflags.Bits.u1VM
1407 || (uSS & X86_SEL_RPL) == 3)
1408 {
1409 if (!EMIsRawRing3Enabled(pVM))
1410 return EMSTATE_REM;
1411
1412 if (!(EFlags.u32 & X86_EFL_IF))
1413 {
1414 Log2(("raw mode refused: IF (RawR3)\n"));
1415 return EMSTATE_REM;
1416 }
1417
1418 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1419 {
1420 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1421 return EMSTATE_REM;
1422 }
1423 }
1424 else
1425 {
1426 if (!EMIsRawRing0Enabled(pVM))
1427 return EMSTATE_REM;
1428
1429 if (EMIsRawRing1Enabled(pVM))
1430 {
1431 /* Only ring 0 and 1 supervisor code. */
1432 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1433 {
1434 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1435 return EMSTATE_REM;
1436 }
1437 }
1438 /* Only ring 0 supervisor code. */
1439 else if ((uSS & X86_SEL_RPL) != 0)
1440 {
1441 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1442 return EMSTATE_REM;
1443 }
1444
1445 // Let's start with pure 32 bits ring 0 code first
1446 /** @todo What's pure 32-bit mode? flat? */
1447 if ( !(pCtx->ss.Attr.n.u1DefBig)
1448 || !(pCtx->cs.Attr.n.u1DefBig))
1449 {
1450 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1451 return EMSTATE_REM;
1452 }
1453
1454 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1455 if (!(u32CR0 & X86_CR0_WP))
1456 {
1457 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1458 return EMSTATE_REM;
1459 }
1460
1461# ifdef VBOX_WITH_RAW_MODE
1462 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1463 {
1464 Log2(("raw r0 mode forced: patch code\n"));
1465# ifdef VBOX_WITH_SAFE_STR
1466 Assert(pCtx->tr.Sel);
1467# endif
1468 return EMSTATE_RAW;
1469 }
1470# endif /* VBOX_WITH_RAW_MODE */
1471
1472# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1473 if (!(EFlags.u32 & X86_EFL_IF))
1474 {
1475 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1476 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1477 return EMSTATE_REM;
1478 }
1479# endif
1480
1481# ifndef VBOX_WITH_RAW_RING1
1482 /** @todo still necessary??? */
1483 if (EFlags.Bits.u2IOPL != 0)
1484 {
1485 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1486 return EMSTATE_REM;
1487 }
1488# endif
1489 }
1490
1491 /*
1492 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1493 */
1494 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1495 {
1496 Log2(("raw mode refused: stale CS\n"));
1497 return EMSTATE_REM;
1498 }
1499 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1500 {
1501 Log2(("raw mode refused: stale SS\n"));
1502 return EMSTATE_REM;
1503 }
1504 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1505 {
1506 Log2(("raw mode refused: stale DS\n"));
1507 return EMSTATE_REM;
1508 }
1509 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1510 {
1511 Log2(("raw mode refused: stale ES\n"));
1512 return EMSTATE_REM;
1513 }
1514 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1515 {
1516 Log2(("raw mode refused: stale FS\n"));
1517 return EMSTATE_REM;
1518 }
1519 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1520 {
1521 Log2(("raw mode refused: stale GS\n"));
1522 return EMSTATE_REM;
1523 }
1524
1525# ifdef VBOX_WITH_SAFE_STR
1526 if (pCtx->tr.Sel == 0)
1527 {
1528 Log(("Raw mode refused -> TR=0\n"));
1529 return EMSTATE_REM;
1530 }
1531# endif
1532
1533 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1534 return EMSTATE_RAW;
1535}
1536
1537
1538/**
1539 * Executes all high priority post execution force actions.
1540 *
1541 * @returns rc or a fatal status code.
1542 *
1543 * @param pVM The cross context VM structure.
1544 * @param pVCpu The cross context virtual CPU structure.
1545 * @param rc The current rc.
1546 */
1547int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1548{
1549 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1550
1551 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1552 PDMCritSectBothFF(pVCpu);
1553
1554 /* Update CR3 (Nested Paging case for HM). */
1555 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1556 {
1557 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1558 if (RT_FAILURE(rc2))
1559 return rc2;
1560 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1561 }
1562
1563 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1564 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1565 {
1566 if (CPUMIsGuestInPAEMode(pVCpu))
1567 {
1568 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1569 AssertPtr(pPdpes);
1570
1571 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1572 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1573 }
1574 else
1575 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1576 }
1577
1578 /* IEM has pending work (typically memory write after INS instruction). */
1579 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1580 rc = VBOXSTRICTRC_TODO(IEMR3DoPendingAction(pVCpu, rc));
1581
1582#ifdef VBOX_WITH_RAW_MODE
1583 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1584 CSAMR3DoPendingAction(pVM, pVCpu);
1585#endif
1586
1587 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1588 {
1589 if ( rc > VINF_EM_NO_MEMORY
1590 && rc <= VINF_EM_LAST)
1591 rc = VINF_EM_NO_MEMORY;
1592 }
1593
1594 return rc;
1595}
1596
1597
1598/**
1599 * Executes all pending forced actions.
1600 *
1601 * Forced actions can cause execution delays and execution
1602 * rescheduling. The first we deal with using action priority, so
1603 * that for instance pending timers aren't scheduled and ran until
1604 * right before execution. The rescheduling we deal with using
1605 * return codes. The same goes for VM termination, only in that case
1606 * we exit everything.
1607 *
1608 * @returns VBox status code of equal or greater importance/severity than rc.
1609 * The most important ones are: VINF_EM_RESCHEDULE,
1610 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1611 *
1612 * @param pVM The cross context VM structure.
1613 * @param pVCpu The cross context virtual CPU structure.
1614 * @param rc The current rc.
1615 *
1616 */
1617int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1618{
1619 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1620#ifdef VBOX_STRICT
1621 int rcIrq = VINF_SUCCESS;
1622#endif
1623 int rc2;
1624#define UPDATE_RC() \
1625 do { \
1626 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1627 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1628 break; \
1629 if (!rc || rc2 < rc) \
1630 rc = rc2; \
1631 } while (0)
1632 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1633
1634 /*
1635 * Post execution chunk first.
1636 */
1637 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1638 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1639 {
1640 /*
1641 * EMT Rendezvous (must be serviced before termination).
1642 */
1643 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1644 {
1645 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1646 UPDATE_RC();
1647 /** @todo HACK ALERT! The following test is to make sure EM+TM
1648 * thinks the VM is stopped/reset before the next VM state change
1649 * is made. We need a better solution for this, or at least make it
1650 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1651 * VINF_EM_SUSPEND). */
1652 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1653 {
1654 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1655 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1656 return rc;
1657 }
1658 }
1659
1660 /*
1661 * State change request (cleared by vmR3SetStateLocked).
1662 */
1663 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1664 {
1665 VMSTATE enmState = VMR3GetState(pVM);
1666 switch (enmState)
1667 {
1668 case VMSTATE_FATAL_ERROR:
1669 case VMSTATE_FATAL_ERROR_LS:
1670 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1671 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1672 return VINF_EM_SUSPEND;
1673
1674 case VMSTATE_DESTROYING:
1675 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1676 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1677 return VINF_EM_TERMINATE;
1678
1679 default:
1680 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1681 }
1682 }
1683
1684 /*
1685 * Debugger Facility polling.
1686 */
1687 if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF))
1688 {
1689 rc2 = DBGFR3VMMForcedAction(pVM);
1690 UPDATE_RC();
1691 }
1692
1693 /*
1694 * Postponed reset request.
1695 */
1696 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1697 {
1698 rc2 = VMR3Reset(pVM->pUVM);
1699 UPDATE_RC();
1700 }
1701
1702#ifdef VBOX_WITH_RAW_MODE
1703 /*
1704 * CSAM page scanning.
1705 */
1706 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1707 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1708 {
1709 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1710
1711 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1712 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1713
1714 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1715 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1716 }
1717#endif
1718
1719 /*
1720 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1721 */
1722 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1723 {
1724 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1725 UPDATE_RC();
1726 if (rc == VINF_EM_NO_MEMORY)
1727 return rc;
1728 }
1729
1730 /* check that we got them all */
1731 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1732 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0));
1733 }
1734
1735 /*
1736 * Normal priority then.
1737 * (Executed in no particular order.)
1738 */
1739 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1740 {
1741 /*
1742 * PDM Queues are pending.
1743 */
1744 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1745 PDMR3QueueFlushAll(pVM);
1746
1747 /*
1748 * PDM DMA transfers are pending.
1749 */
1750 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1751 PDMR3DmaRun(pVM);
1752
1753 /*
1754 * EMT Rendezvous (make sure they are handled before the requests).
1755 */
1756 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1757 {
1758 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1759 UPDATE_RC();
1760 /** @todo HACK ALERT! The following test is to make sure EM+TM
1761 * thinks the VM is stopped/reset before the next VM state change
1762 * is made. We need a better solution for this, or at least make it
1763 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1764 * VINF_EM_SUSPEND). */
1765 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1766 {
1767 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1768 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1769 return rc;
1770 }
1771 }
1772
1773 /*
1774 * Requests from other threads.
1775 */
1776 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1777 {
1778 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1779 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1780 {
1781 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1782 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1783 return rc2;
1784 }
1785 UPDATE_RC();
1786 /** @todo HACK ALERT! The following test is to make sure EM+TM
1787 * thinks the VM is stopped/reset before the next VM state change
1788 * is made. We need a better solution for this, or at least make it
1789 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1790 * VINF_EM_SUSPEND). */
1791 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1792 {
1793 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1794 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1795 return rc;
1796 }
1797 }
1798
1799#ifdef VBOX_WITH_REM
1800 /* Replay the handler notification changes. */
1801 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1802 {
1803 /* Try not to cause deadlocks. */
1804 if ( pVM->cCpus == 1
1805 || ( !PGMIsLockOwner(pVM)
1806 && !IOMIsLockWriteOwner(pVM))
1807 )
1808 {
1809 EMRemLock(pVM);
1810 REMR3ReplayHandlerNotifications(pVM);
1811 EMRemUnlock(pVM);
1812 }
1813 }
1814#endif
1815
1816 /* check that we got them all */
1817 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1818 }
1819
1820 /*
1821 * Normal priority then. (per-VCPU)
1822 * (Executed in no particular order.)
1823 */
1824 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1825 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1826 {
1827 /*
1828 * Requests from other threads.
1829 */
1830 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1831 {
1832 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1833 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1834 {
1835 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1836 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1837 return rc2;
1838 }
1839 UPDATE_RC();
1840 /** @todo HACK ALERT! The following test is to make sure EM+TM
1841 * thinks the VM is stopped/reset before the next VM state change
1842 * is made. We need a better solution for this, or at least make it
1843 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1844 * VINF_EM_SUSPEND). */
1845 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1846 {
1847 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1848 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1849 return rc;
1850 }
1851 }
1852
1853 /*
1854 * Forced unhalting of EMT.
1855 */
1856 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
1857 {
1858 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
1859 if (rc == VINF_EM_HALT)
1860 rc = VINF_EM_RESCHEDULE;
1861 else
1862 {
1863 rc2 = VINF_EM_RESCHEDULE;
1864 UPDATE_RC();
1865 }
1866 }
1867
1868 /* check that we got them all */
1869 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST | VMCPU_FF_UNHALT)));
1870 }
1871
1872 /*
1873 * High priority pre execution chunk last.
1874 * (Executed in ascending priority order.)
1875 */
1876 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1877 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1878 {
1879 /*
1880 * Timers before interrupts.
1881 */
1882 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1883 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1884 TMR3TimerQueuesDo(pVM);
1885
1886 /*
1887 * The instruction following an emulated STI should *always* be executed!
1888 *
1889 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1890 * the eip is the same as the inhibited instr address. Before we
1891 * are able to execute this instruction in raw mode (iret to
1892 * guest code) an external interrupt might force a world switch
1893 * again. Possibly allowing a guest interrupt to be dispatched
1894 * in the process. This could break the guest. Sounds very
1895 * unlikely, but such timing sensitive problem are not as rare as
1896 * you might think.
1897 */
1898 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1899 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1900 {
1901 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1902 {
1903 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1904 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1905 }
1906 else
1907 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1908 }
1909
1910 /*
1911 * Interrupts.
1912 */
1913 bool fWakeupPending = false;
1914 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1915 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1916 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1917 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1918#ifdef VBOX_WITH_RAW_MODE
1919 && PATMAreInterruptsEnabled(pVM)
1920#else
1921 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1922#endif
1923 && !HMR3IsEventPending(pVCpu))
1924 {
1925 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1926 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1927 {
1928 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1929 /** @todo this really isn't nice, should properly handle this */
1930 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1931 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1932 rc2 = VINF_EM_RESCHEDULE;
1933#ifdef VBOX_STRICT
1934 rcIrq = rc2;
1935#endif
1936 UPDATE_RC();
1937 /* Reschedule required: We must not miss the wakeup below! */
1938 fWakeupPending = true;
1939 }
1940#ifdef VBOX_WITH_REM
1941 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1942 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1943 {
1944 Log2(("REMR3QueryPendingInterrupt -> %#x\n", REMR3QueryPendingInterrupt(pVM, pVCpu)));
1945 rc2 = VINF_EM_RESCHEDULE_REM;
1946 UPDATE_RC();
1947 }
1948#endif
1949 }
1950
1951 /*
1952 * Allocate handy pages.
1953 */
1954 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1955 {
1956 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1957 UPDATE_RC();
1958 }
1959
1960 /*
1961 * Debugger Facility request.
1962 */
1963 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1964 {
1965 rc2 = DBGFR3VMMForcedAction(pVM);
1966 UPDATE_RC();
1967 }
1968
1969 /*
1970 * EMT Rendezvous (must be serviced before termination).
1971 */
1972 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1973 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1974 {
1975 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1976 UPDATE_RC();
1977 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1978 * stopped/reset before the next VM state change is made. We need a better
1979 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1980 * && rc >= VINF_EM_SUSPEND). */
1981 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1982 {
1983 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1984 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1985 return rc;
1986 }
1987 }
1988
1989 /*
1990 * State change request (cleared by vmR3SetStateLocked).
1991 */
1992 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1993 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1994 {
1995 VMSTATE enmState = VMR3GetState(pVM);
1996 switch (enmState)
1997 {
1998 case VMSTATE_FATAL_ERROR:
1999 case VMSTATE_FATAL_ERROR_LS:
2000 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2001 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2002 return VINF_EM_SUSPEND;
2003
2004 case VMSTATE_DESTROYING:
2005 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2006 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2007 return VINF_EM_TERMINATE;
2008
2009 default:
2010 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2011 }
2012 }
2013
2014 /*
2015 * Out of memory? Since most of our fellow high priority actions may cause us
2016 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2017 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2018 * than us since we can terminate without allocating more memory.
2019 */
2020 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2021 {
2022 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2023 UPDATE_RC();
2024 if (rc == VINF_EM_NO_MEMORY)
2025 return rc;
2026 }
2027
2028 /*
2029 * If the virtual sync clock is still stopped, make TM restart it.
2030 */
2031 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2032 TMR3VirtualSyncFF(pVM, pVCpu);
2033
2034#ifdef DEBUG
2035 /*
2036 * Debug, pause the VM.
2037 */
2038 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2039 {
2040 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2041 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2042 return VINF_EM_SUSPEND;
2043 }
2044#endif
2045
2046 /* check that we got them all */
2047 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2048 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2049 }
2050
2051#undef UPDATE_RC
2052 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2053 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2054 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2055 return rc;
2056}
2057
2058
2059/**
2060 * Check if the preset execution time cap restricts guest execution scheduling.
2061 *
2062 * @returns true if allowed, false otherwise
2063 * @param pVM The cross context VM structure.
2064 * @param pVCpu The cross context virtual CPU structure.
2065 */
2066bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2067{
2068 uint64_t u64UserTime, u64KernelTime;
2069
2070 if ( pVM->uCpuExecutionCap != 100
2071 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2072 {
2073 uint64_t u64TimeNow = RTTimeMilliTS();
2074 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2075 {
2076 /* New time slice. */
2077 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2078 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2079 pVCpu->em.s.u64TimeSliceExec = 0;
2080 }
2081 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2082
2083 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2084 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2085 return false;
2086 }
2087 return true;
2088}
2089
2090
2091/**
2092 * Execute VM.
2093 *
2094 * This function is the main loop of the VM. The emulation thread
2095 * calls this function when the VM has been successfully constructed
2096 * and we're ready for executing the VM.
2097 *
2098 * Returning from this function means that the VM is turned off or
2099 * suspended (state already saved) and deconstruction is next in line.
2100 *
2101 * All interaction from other thread are done using forced actions
2102 * and signaling of the wait object.
2103 *
2104 * @returns VBox status code, informational status codes may indicate failure.
2105 * @param pVM The cross context VM structure.
2106 * @param pVCpu The cross context virtual CPU structure.
2107 */
2108VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2109{
2110 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2111 pVM,
2112 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2113 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2114 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2115 pVCpu->em.s.fForceRAW));
2116 VM_ASSERT_EMT(pVM);
2117 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2118 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2119 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2120 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2121
2122 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2123 if (rc == 0)
2124 {
2125 /*
2126 * Start the virtual time.
2127 */
2128 TMR3NotifyResume(pVM, pVCpu);
2129
2130 /*
2131 * The Outer Main Loop.
2132 */
2133 bool fFFDone = false;
2134
2135 /* Reschedule right away to start in the right state. */
2136 rc = VINF_SUCCESS;
2137
2138 /* If resuming after a pause or a state load, restore the previous
2139 state or else we'll start executing code. Else, just reschedule. */
2140 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2141 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2142 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2143 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2144 else
2145 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2146 pVCpu->em.s.cIemThenRemInstructions = 0;
2147 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2148
2149 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2150 for (;;)
2151 {
2152 /*
2153 * Before we can schedule anything (we're here because
2154 * scheduling is required) we must service any pending
2155 * forced actions to avoid any pending action causing
2156 * immediate rescheduling upon entering an inner loop
2157 *
2158 * Do forced actions.
2159 */
2160 if ( !fFFDone
2161 && RT_SUCCESS(rc)
2162 && rc != VINF_EM_TERMINATE
2163 && rc != VINF_EM_OFF
2164 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2165 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2166 {
2167 rc = emR3ForcedActions(pVM, pVCpu, rc);
2168 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2169 if ( ( rc == VINF_EM_RESCHEDULE_REM
2170 || rc == VINF_EM_RESCHEDULE_HM)
2171 && pVCpu->em.s.fForceRAW)
2172 rc = VINF_EM_RESCHEDULE_RAW;
2173 }
2174 else if (fFFDone)
2175 fFFDone = false;
2176
2177 /*
2178 * Now what to do?
2179 */
2180 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2181 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2182 switch (rc)
2183 {
2184 /*
2185 * Keep doing what we're currently doing.
2186 */
2187 case VINF_SUCCESS:
2188 break;
2189
2190 /*
2191 * Reschedule - to raw-mode execution.
2192 */
2193 case VINF_EM_RESCHEDULE_RAW:
2194 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2195 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2196 pVCpu->em.s.enmState = EMSTATE_RAW;
2197 break;
2198
2199 /*
2200 * Reschedule - to hardware accelerated raw-mode execution.
2201 */
2202 case VINF_EM_RESCHEDULE_HM:
2203 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2204 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2205 Assert(!pVCpu->em.s.fForceRAW);
2206 pVCpu->em.s.enmState = EMSTATE_HM;
2207 break;
2208
2209 /*
2210 * Reschedule - to recompiled execution.
2211 */
2212 case VINF_EM_RESCHEDULE_REM:
2213 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2214 if (HMIsEnabled(pVM))
2215 {
2216 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2217 enmOldState, EMSTATE_IEM_THEN_REM));
2218 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2219 {
2220 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2221 pVCpu->em.s.cIemThenRemInstructions = 0;
2222 }
2223 }
2224 else
2225 {
2226 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2227 pVCpu->em.s.enmState = EMSTATE_REM;
2228 }
2229 break;
2230
2231 /*
2232 * Resume.
2233 */
2234 case VINF_EM_RESUME:
2235 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2236 /* Don't reschedule in the halted or wait for SIPI case. */
2237 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2238 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2239 {
2240 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2241 break;
2242 }
2243 /* fall through and get scheduled. */
2244
2245 /*
2246 * Reschedule.
2247 */
2248 case VINF_EM_RESCHEDULE:
2249 {
2250 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2251 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2252 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2253 pVCpu->em.s.cIemThenRemInstructions = 0;
2254 pVCpu->em.s.enmState = enmState;
2255 break;
2256 }
2257
2258 /*
2259 * Halted.
2260 */
2261 case VINF_EM_HALT:
2262 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2263 pVCpu->em.s.enmState = EMSTATE_HALTED;
2264 break;
2265
2266 /*
2267 * Switch to the wait for SIPI state (application processor only)
2268 */
2269 case VINF_EM_WAIT_SIPI:
2270 Assert(pVCpu->idCpu != 0);
2271 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2272 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2273 break;
2274
2275
2276 /*
2277 * Suspend.
2278 */
2279 case VINF_EM_SUSPEND:
2280 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2281 Assert(enmOldState != EMSTATE_SUSPENDED);
2282 pVCpu->em.s.enmPrevState = enmOldState;
2283 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2284 break;
2285
2286 /*
2287 * Reset.
2288 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2289 */
2290 case VINF_EM_RESET:
2291 {
2292 if (pVCpu->idCpu == 0)
2293 {
2294 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2295 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2296 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2297 pVCpu->em.s.cIemThenRemInstructions = 0;
2298 pVCpu->em.s.enmState = enmState;
2299 }
2300 else
2301 {
2302 /* All other VCPUs go into the wait for SIPI state. */
2303 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2304 }
2305 break;
2306 }
2307
2308 /*
2309 * Power Off.
2310 */
2311 case VINF_EM_OFF:
2312 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2313 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2314 TMR3NotifySuspend(pVM, pVCpu);
2315 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2316 return rc;
2317
2318 /*
2319 * Terminate the VM.
2320 */
2321 case VINF_EM_TERMINATE:
2322 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2323 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2324 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2325 TMR3NotifySuspend(pVM, pVCpu);
2326 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2327 return rc;
2328
2329
2330 /*
2331 * Out of memory, suspend the VM and stuff.
2332 */
2333 case VINF_EM_NO_MEMORY:
2334 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2335 Assert(enmOldState != EMSTATE_SUSPENDED);
2336 pVCpu->em.s.enmPrevState = enmOldState;
2337 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2338 TMR3NotifySuspend(pVM, pVCpu);
2339 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2340
2341 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2342 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2343 if (rc != VINF_EM_SUSPEND)
2344 {
2345 if (RT_SUCCESS_NP(rc))
2346 {
2347 AssertLogRelMsgFailed(("%Rrc\n", rc));
2348 rc = VERR_EM_INTERNAL_ERROR;
2349 }
2350 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2351 }
2352 return rc;
2353
2354 /*
2355 * Guest debug events.
2356 */
2357 case VINF_EM_DBG_STEPPED:
2358 case VINF_EM_DBG_STOP:
2359 case VINF_EM_DBG_BREAKPOINT:
2360 case VINF_EM_DBG_STEP:
2361 if (enmOldState == EMSTATE_RAW)
2362 {
2363 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2364 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2365 }
2366 else if (enmOldState == EMSTATE_HM)
2367 {
2368 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2369 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2370 }
2371 else if (enmOldState == EMSTATE_REM)
2372 {
2373 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2374 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2375 }
2376 else
2377 {
2378 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2379 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2380 }
2381 break;
2382
2383 /*
2384 * Hypervisor debug events.
2385 */
2386 case VINF_EM_DBG_HYPER_STEPPED:
2387 case VINF_EM_DBG_HYPER_BREAKPOINT:
2388 case VINF_EM_DBG_HYPER_ASSERTION:
2389 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2390 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2391 break;
2392
2393 /*
2394 * Triple fault.
2395 */
2396 case VINF_EM_TRIPLE_FAULT:
2397 if (!pVM->em.s.fGuruOnTripleFault)
2398 {
2399 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2400 Assert(pVM->cCpus == 1);
2401 REMR3Reset(pVM);
2402 PGMR3ResetCpu(pVM, pVCpu);
2403 TRPMR3ResetCpu(pVCpu);
2404 CPUMR3ResetCpu(pVM, pVCpu);
2405 EMR3ResetCpu(pVCpu);
2406 HMR3ResetCpu(pVCpu);
2407 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2408 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d\n", enmOldState, pVCpu->em.s.enmState));
2409 break;
2410 }
2411 /* Else fall through and trigger a guru. */
2412 case VERR_VMM_RING0_ASSERTION:
2413 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2414 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2415 break;
2416
2417 /*
2418 * Any error code showing up here other than the ones we
2419 * know and process above are considered to be FATAL.
2420 *
2421 * Unknown warnings and informational status codes are also
2422 * included in this.
2423 */
2424 default:
2425 if (RT_SUCCESS_NP(rc))
2426 {
2427 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2428 rc = VERR_EM_INTERNAL_ERROR;
2429 }
2430 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2431 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2432 break;
2433 }
2434
2435 /*
2436 * Act on state transition.
2437 */
2438 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2439 if (enmOldState != enmNewState)
2440 {
2441 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2442
2443 /* Clear MWait flags. */
2444 if ( enmOldState == EMSTATE_HALTED
2445 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2446 && ( enmNewState == EMSTATE_RAW
2447 || enmNewState == EMSTATE_HM
2448 || enmNewState == EMSTATE_REM
2449 || enmNewState == EMSTATE_IEM_THEN_REM
2450 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2451 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2452 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2453 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2454 {
2455 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2456 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2457 }
2458 }
2459 else
2460 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2461
2462 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2463 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2464
2465 /*
2466 * Act on the new state.
2467 */
2468 switch (enmNewState)
2469 {
2470 /*
2471 * Execute raw.
2472 */
2473 case EMSTATE_RAW:
2474#ifdef VBOX_WITH_RAW_MODE
2475 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2476#else
2477 AssertLogRelMsgFailed(("%Rrc\n", rc));
2478 rc = VERR_EM_INTERNAL_ERROR;
2479#endif
2480 break;
2481
2482 /*
2483 * Execute hardware accelerated raw.
2484 */
2485 case EMSTATE_HM:
2486 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2487 break;
2488
2489 /*
2490 * Execute recompiled.
2491 */
2492 case EMSTATE_REM:
2493 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2494 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2495 break;
2496
2497 /*
2498 * Execute in the interpreter.
2499 */
2500 case EMSTATE_IEM:
2501 {
2502#if 0 /* For testing purposes. */
2503 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2504 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2505 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2506 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2507 rc = VINF_SUCCESS;
2508 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2509#endif
2510 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
2511 if (pVM->em.s.fIemExecutesAll)
2512 {
2513 Assert(rc != VINF_EM_RESCHEDULE_REM);
2514 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2515 Assert(rc != VINF_EM_RESCHEDULE_HM);
2516 }
2517 fFFDone = false;
2518 break;
2519 }
2520
2521 /*
2522 * Execute in IEM, hoping we can quickly switch aback to HM
2523 * or RAW execution. If our hopes fail, we go to REM.
2524 */
2525 case EMSTATE_IEM_THEN_REM:
2526 {
2527 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2528 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2529 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2530 break;
2531 }
2532
2533 /*
2534 * Application processor execution halted until SIPI.
2535 */
2536 case EMSTATE_WAIT_SIPI:
2537 /* no break */
2538 /*
2539 * hlt - execution halted until interrupt.
2540 */
2541 case EMSTATE_HALTED:
2542 {
2543 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2544 /* If HM (or someone else) store a pending interrupt in
2545 TRPM, it must be dispatched ASAP without any halting.
2546 Anything pending in TRPM has been accepted and the CPU
2547 should already be the right state to receive it. */
2548 if (TRPMHasTrap(pVCpu))
2549 rc = VINF_EM_RESCHEDULE;
2550 /* MWAIT has a special extension where it's woken up when
2551 an interrupt is pending even when IF=0. */
2552 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2553 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2554 {
2555 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2556 if ( rc == VINF_SUCCESS
2557 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2558 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2559 {
2560 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2561 rc = VINF_EM_RESCHEDULE;
2562 }
2563 }
2564 else
2565 {
2566 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2567 if ( rc == VINF_SUCCESS
2568 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2569 {
2570 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2571 rc = VINF_EM_RESCHEDULE;
2572 }
2573 }
2574
2575 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2576 break;
2577 }
2578
2579 /*
2580 * Suspended - return to VM.cpp.
2581 */
2582 case EMSTATE_SUSPENDED:
2583 TMR3NotifySuspend(pVM, pVCpu);
2584 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2585 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2586 return VINF_EM_SUSPEND;
2587
2588 /*
2589 * Debugging in the guest.
2590 */
2591 case EMSTATE_DEBUG_GUEST_RAW:
2592 case EMSTATE_DEBUG_GUEST_HM:
2593 case EMSTATE_DEBUG_GUEST_IEM:
2594 case EMSTATE_DEBUG_GUEST_REM:
2595 TMR3NotifySuspend(pVM, pVCpu);
2596 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2597 TMR3NotifyResume(pVM, pVCpu);
2598 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2599 break;
2600
2601 /*
2602 * Debugging in the hypervisor.
2603 */
2604 case EMSTATE_DEBUG_HYPER:
2605 {
2606 TMR3NotifySuspend(pVM, pVCpu);
2607 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2608
2609 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2610 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2611 if (rc != VINF_SUCCESS)
2612 {
2613 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2614 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2615 else
2616 {
2617 /* switch to guru meditation mode */
2618 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2619 VMMR3FatalDump(pVM, pVCpu, rc);
2620 }
2621 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2622 return rc;
2623 }
2624
2625 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2626 TMR3NotifyResume(pVM, pVCpu);
2627 break;
2628 }
2629
2630 /*
2631 * Guru meditation takes place in the debugger.
2632 */
2633 case EMSTATE_GURU_MEDITATION:
2634 {
2635 TMR3NotifySuspend(pVM, pVCpu);
2636 VMMR3FatalDump(pVM, pVCpu, rc);
2637 emR3Debug(pVM, pVCpu, rc);
2638 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2639 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2640 return rc;
2641 }
2642
2643 /*
2644 * The states we don't expect here.
2645 */
2646 case EMSTATE_NONE:
2647 case EMSTATE_TERMINATING:
2648 default:
2649 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2650 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2651 TMR3NotifySuspend(pVM, pVCpu);
2652 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2653 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2654 return VERR_EM_INTERNAL_ERROR;
2655 }
2656 } /* The Outer Main Loop */
2657 }
2658 else
2659 {
2660 /*
2661 * Fatal error.
2662 */
2663 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2664 TMR3NotifySuspend(pVM, pVCpu);
2665 VMMR3FatalDump(pVM, pVCpu, rc);
2666 emR3Debug(pVM, pVCpu, rc);
2667 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2668 /** @todo change the VM state! */
2669 return rc;
2670 }
2671
2672 /* (won't ever get here). */
2673 AssertFailed();
2674}
2675
2676/**
2677 * Notify EM of a state change (used by FTM)
2678 *
2679 * @param pVM The cross context VM structure.
2680 */
2681VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2682{
2683 PVMCPU pVCpu = VMMGetCpu(pVM);
2684
2685 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2686 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2687 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2688 return VINF_SUCCESS;
2689}
2690
2691/**
2692 * Notify EM of a state change (used by FTM)
2693 *
2694 * @param pVM The cross context VM structure.
2695 */
2696VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2697{
2698 PVMCPU pVCpu = VMMGetCpu(pVM);
2699 EMSTATE enmCurState = pVCpu->em.s.enmState;
2700
2701 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2702 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2703 pVCpu->em.s.enmPrevState = enmCurState;
2704 return VINF_SUCCESS;
2705}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette