VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 48215

Last change on this file since 48215 was 48130, checked in by vboxsync, 11 years ago

VMM: Enable thread-context hooks. Reimplemented event injection logic for VT-x and AMD-V.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 120.3 KB
Line 
1/* $Id: EM.cpp 48130 2013-08-28 17:14:38Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/vmm/em.h>
39#include <VBox/vmm/vmm.h>
40#include <VBox/vmm/patm.h>
41#include <VBox/vmm/csam.h>
42#include <VBox/vmm/selm.h>
43#include <VBox/vmm/trpm.h>
44#include <VBox/vmm/iem.h>
45#include <VBox/vmm/iom.h>
46#include <VBox/vmm/dbgf.h>
47#include <VBox/vmm/pgm.h>
48#ifdef VBOX_WITH_REM
49# include <VBox/vmm/rem.h>
50#endif
51#include <VBox/vmm/tm.h>
52#include <VBox/vmm/mm.h>
53#include <VBox/vmm/ssm.h>
54#include <VBox/vmm/pdmapi.h>
55#include <VBox/vmm/pdmcritsect.h>
56#include <VBox/vmm/pdmqueue.h>
57#include <VBox/vmm/hm.h>
58#include <VBox/vmm/patm.h>
59#include "EMInternal.h"
60#include <VBox/vmm/vm.h>
61#include <VBox/vmm/uvm.h>
62#include <VBox/vmm/cpumdis.h>
63#include <VBox/dis.h>
64#include <VBox/disopcode.h>
65#include <VBox/vmm/dbgf.h>
66#include "VMMTracing.h"
67
68#include <iprt/asm.h>
69#include <iprt/string.h>
70#include <iprt/stream.h>
71#include <iprt/thread.h>
72
73
74/*******************************************************************************
75* Defined Constants And Macros *
76*******************************************************************************/
77#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
78#define EM_NOTIFY_HM
79#endif
80
81
82/*******************************************************************************
83* Internal Functions *
84*******************************************************************************/
85static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
86static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
87#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
88static const char *emR3GetStateName(EMSTATE enmState);
89#endif
90static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
91static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
92static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
93int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
94
95
96/**
97 * Initializes the EM.
98 *
99 * @returns VBox status code.
100 * @param pVM Pointer to the VM.
101 */
102VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
103{
104 LogFlow(("EMR3Init\n"));
105 /*
106 * Assert alignment and sizes.
107 */
108 AssertCompileMemberAlignment(VM, em.s, 32);
109 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
110 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
111
112 /*
113 * Init the structure.
114 */
115 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
116 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
117 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
118
119 bool fEnabled;
120 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
121 AssertLogRelRCReturn(rc, rc);
122 pVM->fRecompileUser = !fEnabled;
123
124 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileSupervisor = !fEnabled;
127
128#ifdef VBOX_WITH_RAW_RING1
129 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
130 AssertLogRelRCReturn(rc, rc);
131#else
132 pVM->fRawRing1Enabled = false; /* Disabled by default. */
133#endif
134
135 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
136 AssertLogRelRCReturn(rc, rc);
137
138 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool\n",
139 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll));
140
141#ifdef VBOX_WITH_REM
142 /*
143 * Initialize the REM critical section.
144 */
145 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
146 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
147 AssertRCReturn(rc, rc);
148#endif
149
150 /*
151 * Saved state.
152 */
153 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
154 NULL, NULL, NULL,
155 NULL, emR3Save, NULL,
156 NULL, emR3Load, NULL);
157 if (RT_FAILURE(rc))
158 return rc;
159
160 for (VMCPUID i = 0; i < pVM->cCpus; i++)
161 {
162 PVMCPU pVCpu = &pVM->aCpus[i];
163
164 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
165 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
166 pVCpu->em.s.fForceRAW = false;
167
168 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
169#ifdef VBOX_WITH_RAW_MODE
170 if (!HMIsEnabled(pVM))
171 {
172 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
173 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
174 }
175#endif
176
177 /* Force reset of the time slice. */
178 pVCpu->em.s.u64TimeSliceStart = 0;
179
180# define EM_REG_COUNTER(a, b, c) \
181 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
182 AssertRC(rc);
183
184# define EM_REG_COUNTER_USED(a, b, c) \
185 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
186 AssertRC(rc);
187
188# define EM_REG_PROFILE(a, b, c) \
189 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
190 AssertRC(rc);
191
192# define EM_REG_PROFILE_ADV(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
194 AssertRC(rc);
195
196 /*
197 * Statistics.
198 */
199#ifdef VBOX_WITH_STATISTICS
200 PEMSTATS pStats;
201 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
202 if (RT_FAILURE(rc))
203 return rc;
204
205 pVCpu->em.s.pStatsR3 = pStats;
206 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
207 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
208
209 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
210 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
211
212 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
213 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
214
215 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
289
290 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
291 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
292
293 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
345
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
374
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
379
380 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
381# ifdef VBOX_WITH_FIRST_IEM_STEP
382 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
383# else
384 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
385 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
386# endif
387 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
388 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
389 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
390 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
391 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
392 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
393 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
394 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
411
412 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
413 pVCpu->em.s.pCliStatTree = 0;
414
415 /* these should be considered for release statistics. */
416 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
417 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
418 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
419 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
420 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
421 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
422 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
423 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
424 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
428
429#endif /* VBOX_WITH_STATISTICS */
430
431 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
432 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
433 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
434 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
435 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
436
437 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
438 }
439
440 emR3InitDbg(pVM);
441 return VINF_SUCCESS;
442}
443
444
445/**
446 * Applies relocations to data and code managed by this
447 * component. This function will be called at init and
448 * whenever the VMM need to relocate it self inside the GC.
449 *
450 * @param pVM Pointer to the VM.
451 */
452VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
453{
454 LogFlow(("EMR3Relocate\n"));
455 for (VMCPUID i = 0; i < pVM->cCpus; i++)
456 {
457 PVMCPU pVCpu = &pVM->aCpus[i];
458 if (pVCpu->em.s.pStatsR3)
459 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
460 }
461}
462
463
464/**
465 * Reset the EM state for a CPU.
466 *
467 * Called by EMR3Reset and hot plugging.
468 *
469 * @param pVCpu Pointer to the VMCPU.
470 */
471VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
472{
473 pVCpu->em.s.fForceRAW = false;
474
475 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
476 out of the HALTED state here so that enmPrevState doesn't end up as
477 HALTED when EMR3Execute returns. */
478 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
479 {
480 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
481 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
482 }
483}
484
485
486/**
487 * Reset notification.
488 *
489 * @param pVM Pointer to the VM.
490 */
491VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
492{
493 Log(("EMR3Reset: \n"));
494 for (VMCPUID i = 0; i < pVM->cCpus; i++)
495 EMR3ResetCpu(&pVM->aCpus[i]);
496}
497
498
499/**
500 * Terminates the EM.
501 *
502 * Termination means cleaning up and freeing all resources,
503 * the VM it self is at this point powered off or suspended.
504 *
505 * @returns VBox status code.
506 * @param pVM Pointer to the VM.
507 */
508VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
509{
510 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
511
512#ifdef VBOX_WITH_REM
513 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
514#endif
515 return VINF_SUCCESS;
516}
517
518
519/**
520 * Execute state save operation.
521 *
522 * @returns VBox status code.
523 * @param pVM Pointer to the VM.
524 * @param pSSM SSM operation handle.
525 */
526static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
527{
528 for (VMCPUID i = 0; i < pVM->cCpus; i++)
529 {
530 PVMCPU pVCpu = &pVM->aCpus[i];
531
532 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
533 AssertRCReturn(rc, rc);
534
535 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
536 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
537 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
538 AssertRCReturn(rc, rc);
539
540 /* Save mwait state. */
541 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
542 AssertRCReturn(rc, rc);
543 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
544 AssertRCReturn(rc, rc);
545 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
546 AssertRCReturn(rc, rc);
547 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
548 AssertRCReturn(rc, rc);
549 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
550 AssertRCReturn(rc, rc);
551 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
552 AssertRCReturn(rc, rc);
553 }
554 return VINF_SUCCESS;
555}
556
557
558/**
559 * Execute state load operation.
560 *
561 * @returns VBox status code.
562 * @param pVM Pointer to the VM.
563 * @param pSSM SSM operation handle.
564 * @param uVersion Data layout version.
565 * @param uPass The data pass.
566 */
567static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
568{
569 /*
570 * Validate version.
571 */
572 if ( uVersion > EM_SAVED_STATE_VERSION
573 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
574 {
575 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
576 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
577 }
578 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
579
580 /*
581 * Load the saved state.
582 */
583 for (VMCPUID i = 0; i < pVM->cCpus; i++)
584 {
585 PVMCPU pVCpu = &pVM->aCpus[i];
586
587 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
588 if (RT_FAILURE(rc))
589 pVCpu->em.s.fForceRAW = false;
590 AssertRCReturn(rc, rc);
591
592 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
593 {
594 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
595 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
596 AssertRCReturn(rc, rc);
597 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
598
599 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
600 }
601 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
602 {
603 /* Load mwait state. */
604 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
605 AssertRCReturn(rc, rc);
606 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
607 AssertRCReturn(rc, rc);
608 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
609 AssertRCReturn(rc, rc);
610 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
611 AssertRCReturn(rc, rc);
612 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
613 AssertRCReturn(rc, rc);
614 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
615 AssertRCReturn(rc, rc);
616 }
617
618 Assert(!pVCpu->em.s.pCliStatTree);
619 }
620 return VINF_SUCCESS;
621}
622
623
624/**
625 * Argument packet for emR3SetExecutionPolicy.
626 */
627struct EMR3SETEXECPOLICYARGS
628{
629 EMEXECPOLICY enmPolicy;
630 bool fEnforce;
631};
632
633
634/**
635 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
636 */
637static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
638{
639 /*
640 * Only the first CPU changes the variables.
641 */
642 if (pVCpu->idCpu == 0)
643 {
644 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
645 switch (pArgs->enmPolicy)
646 {
647 case EMEXECPOLICY_RECOMPILE_RING0:
648 pVM->fRecompileSupervisor = pArgs->fEnforce;
649 break;
650 case EMEXECPOLICY_RECOMPILE_RING3:
651 pVM->fRecompileUser = pArgs->fEnforce;
652 break;
653 case EMEXECPOLICY_IEM_ALL:
654 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
655 break;
656 default:
657 AssertFailedReturn(VERR_INVALID_PARAMETER);
658 }
659 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
660 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
661 }
662
663 /*
664 * Force rescheduling if in RAW, HM, IEM, or REM.
665 */
666 return pVCpu->em.s.enmState == EMSTATE_RAW
667 || pVCpu->em.s.enmState == EMSTATE_HM
668 || pVCpu->em.s.enmState == EMSTATE_IEM
669 || pVCpu->em.s.enmState == EMSTATE_REM
670 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
671 ? VINF_EM_RESCHEDULE
672 : VINF_SUCCESS;
673}
674
675
676/**
677 * Changes an execution scheduling policy parameter.
678 *
679 * This is used to enable or disable raw-mode / hardware-virtualization
680 * execution of user and supervisor code.
681 *
682 * @returns VINF_SUCCESS on success.
683 * @returns VINF_RESCHEDULE if a rescheduling might be required.
684 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
685 *
686 * @param pUVM The user mode VM handle.
687 * @param enmPolicy The scheduling policy to change.
688 * @param fEnforce Whether to enforce the policy or not.
689 */
690VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
691{
692 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
693 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
694 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
695
696 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
697 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
698}
699
700
701/**
702 * Queries an execution scheduling policy parameter.
703 *
704 * @returns VBox status code
705 * @param pUVM The user mode VM handle.
706 * @param enmPolicy The scheduling policy to query.
707 * @param pfEnforced Where to return the current value.
708 */
709VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
710{
711 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
712 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
713 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
714 PVM pVM = pUVM->pVM;
715 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
716
717 /* No need to bother EMTs with a query. */
718 switch (enmPolicy)
719 {
720 case EMEXECPOLICY_RECOMPILE_RING0:
721 *pfEnforced = pVM->fRecompileSupervisor;
722 break;
723 case EMEXECPOLICY_RECOMPILE_RING3:
724 *pfEnforced = pVM->fRecompileUser;
725 break;
726 case EMEXECPOLICY_IEM_ALL:
727 *pfEnforced = pVM->em.s.fIemExecutesAll;
728 break;
729 default:
730 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
731 }
732
733 return VINF_SUCCESS;
734}
735
736
737/**
738 * Raise a fatal error.
739 *
740 * Safely terminate the VM with full state report and stuff. This function
741 * will naturally never return.
742 *
743 * @param pVCpu Pointer to the VMCPU.
744 * @param rc VBox status code.
745 */
746VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
747{
748 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
749 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
750 AssertReleaseMsgFailed(("longjmp returned!\n"));
751}
752
753
754#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
755/**
756 * Gets the EM state name.
757 *
758 * @returns pointer to read only state name,
759 * @param enmState The state.
760 */
761static const char *emR3GetStateName(EMSTATE enmState)
762{
763 switch (enmState)
764 {
765 case EMSTATE_NONE: return "EMSTATE_NONE";
766 case EMSTATE_RAW: return "EMSTATE_RAW";
767 case EMSTATE_HM: return "EMSTATE_HM";
768 case EMSTATE_IEM: return "EMSTATE_IEM";
769 case EMSTATE_REM: return "EMSTATE_REM";
770 case EMSTATE_HALTED: return "EMSTATE_HALTED";
771 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
772 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
773 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
774 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
775 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
776 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
777 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
778 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
779 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
780 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
781 default: return "Unknown!";
782 }
783}
784#endif /* LOG_ENABLED || VBOX_STRICT */
785
786
787/**
788 * Debug loop.
789 *
790 * @returns VBox status code for EM.
791 * @param pVM Pointer to the VM.
792 * @param pVCpu Pointer to the VMCPU.
793 * @param rc Current EM VBox status code.
794 */
795static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
796{
797 for (;;)
798 {
799 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
800 const VBOXSTRICTRC rcLast = rc;
801
802 /*
803 * Debug related RC.
804 */
805 switch (VBOXSTRICTRC_VAL(rc))
806 {
807 /*
808 * Single step an instruction.
809 */
810 case VINF_EM_DBG_STEP:
811 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
812 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
813 || pVCpu->em.s.fForceRAW /* paranoia */)
814#ifdef VBOX_WITH_RAW_MODE
815 rc = emR3RawStep(pVM, pVCpu);
816#else
817 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
818#endif
819 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
820 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
821#ifdef VBOX_WITH_REM
822 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
823 rc = emR3RemStep(pVM, pVCpu);
824#endif
825 else
826 {
827 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
828 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
829 rc = VINF_EM_DBG_STEPPED;
830 }
831 break;
832
833 /*
834 * Simple events: stepped, breakpoint, stop/assertion.
835 */
836 case VINF_EM_DBG_STEPPED:
837 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
838 break;
839
840 case VINF_EM_DBG_BREAKPOINT:
841 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
842 break;
843
844 case VINF_EM_DBG_STOP:
845 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
846 break;
847
848 case VINF_EM_DBG_HYPER_STEPPED:
849 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
850 break;
851
852 case VINF_EM_DBG_HYPER_BREAKPOINT:
853 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
854 break;
855
856 case VINF_EM_DBG_HYPER_ASSERTION:
857 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
858 RTLogFlush(NULL);
859 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
860 break;
861
862 /*
863 * Guru meditation.
864 */
865 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
866 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
867 break;
868 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
869 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
870 break;
871
872 default: /** @todo don't use default for guru, but make special errors code! */
873 {
874 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
875 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
876 break;
877 }
878 }
879
880 /*
881 * Process the result.
882 */
883 do
884 {
885 switch (VBOXSTRICTRC_VAL(rc))
886 {
887 /*
888 * Continue the debugging loop.
889 */
890 case VINF_EM_DBG_STEP:
891 case VINF_EM_DBG_STOP:
892 case VINF_EM_DBG_STEPPED:
893 case VINF_EM_DBG_BREAKPOINT:
894 case VINF_EM_DBG_HYPER_STEPPED:
895 case VINF_EM_DBG_HYPER_BREAKPOINT:
896 case VINF_EM_DBG_HYPER_ASSERTION:
897 break;
898
899 /*
900 * Resuming execution (in some form) has to be done here if we got
901 * a hypervisor debug event.
902 */
903 case VINF_SUCCESS:
904 case VINF_EM_RESUME:
905 case VINF_EM_SUSPEND:
906 case VINF_EM_RESCHEDULE:
907 case VINF_EM_RESCHEDULE_RAW:
908 case VINF_EM_RESCHEDULE_REM:
909 case VINF_EM_HALT:
910 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
911 {
912#ifdef VBOX_WITH_RAW_MODE
913 rc = emR3RawResumeHyper(pVM, pVCpu);
914 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
915 continue;
916#else
917 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
918#endif
919 }
920 if (rc == VINF_SUCCESS)
921 rc = VINF_EM_RESCHEDULE;
922 return rc;
923
924 /*
925 * The debugger isn't attached.
926 * We'll simply turn the thing off since that's the easiest thing to do.
927 */
928 case VERR_DBGF_NOT_ATTACHED:
929 switch (VBOXSTRICTRC_VAL(rcLast))
930 {
931 case VINF_EM_DBG_HYPER_STEPPED:
932 case VINF_EM_DBG_HYPER_BREAKPOINT:
933 case VINF_EM_DBG_HYPER_ASSERTION:
934 case VERR_TRPM_PANIC:
935 case VERR_TRPM_DONT_PANIC:
936 case VERR_VMM_RING0_ASSERTION:
937 case VERR_VMM_HYPER_CR3_MISMATCH:
938 case VERR_VMM_RING3_CALL_DISABLED:
939 return rcLast;
940 }
941 return VINF_EM_OFF;
942
943 /*
944 * Status codes terminating the VM in one or another sense.
945 */
946 case VINF_EM_TERMINATE:
947 case VINF_EM_OFF:
948 case VINF_EM_RESET:
949 case VINF_EM_NO_MEMORY:
950 case VINF_EM_RAW_STALE_SELECTOR:
951 case VINF_EM_RAW_IRET_TRAP:
952 case VERR_TRPM_PANIC:
953 case VERR_TRPM_DONT_PANIC:
954 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
955 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
956 case VERR_VMM_RING0_ASSERTION:
957 case VERR_VMM_HYPER_CR3_MISMATCH:
958 case VERR_VMM_RING3_CALL_DISABLED:
959 case VERR_INTERNAL_ERROR:
960 case VERR_INTERNAL_ERROR_2:
961 case VERR_INTERNAL_ERROR_3:
962 case VERR_INTERNAL_ERROR_4:
963 case VERR_INTERNAL_ERROR_5:
964 case VERR_IPE_UNEXPECTED_STATUS:
965 case VERR_IPE_UNEXPECTED_INFO_STATUS:
966 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
967 return rc;
968
969 /*
970 * The rest is unexpected, and will keep us here.
971 */
972 default:
973 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
974 break;
975 }
976 } while (false);
977 } /* debug for ever */
978}
979
980
981/**
982 * Steps recompiled code.
983 *
984 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
985 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
986 *
987 * @param pVM Pointer to the VM.
988 * @param pVCpu Pointer to the VMCPU.
989 */
990static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
991{
992 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
993
994#ifdef VBOX_WITH_REM
995 EMRemLock(pVM);
996
997 /*
998 * Switch to REM, step instruction, switch back.
999 */
1000 int rc = REMR3State(pVM, pVCpu);
1001 if (RT_SUCCESS(rc))
1002 {
1003 rc = REMR3Step(pVM, pVCpu);
1004 REMR3StateBack(pVM, pVCpu);
1005 }
1006 EMRemUnlock(pVM);
1007
1008#else
1009 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1010#endif
1011
1012 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1013 return rc;
1014}
1015
1016
1017/**
1018 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1019 * critical section.
1020 *
1021 * @returns false - new fInREMState value.
1022 * @param pVM Pointer to the VM.
1023 * @param pVCpu Pointer to the VMCPU.
1024 */
1025DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1026{
1027#ifdef VBOX_WITH_REM
1028 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1029 REMR3StateBack(pVM, pVCpu);
1030 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1031
1032 EMRemUnlock(pVM);
1033#endif
1034 return false;
1035}
1036
1037
1038/**
1039 * Executes recompiled code.
1040 *
1041 * This function contains the recompiler version of the inner
1042 * execution loop (the outer loop being in EMR3ExecuteVM()).
1043 *
1044 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1045 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1046 *
1047 * @param pVM Pointer to the VM.
1048 * @param pVCpu Pointer to the VMCPU.
1049 * @param pfFFDone Where to store an indicator telling whether or not
1050 * FFs were done before returning.
1051 *
1052 */
1053static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1054{
1055#ifdef LOG_ENABLED
1056 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1057 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1058
1059 if (pCtx->eflags.Bits.u1VM)
1060 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1061 else
1062 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1063#endif
1064 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1065
1066#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1067 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1068 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1069 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1070#endif
1071
1072 /*
1073 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1074 * or the REM suggests raw-mode execution.
1075 */
1076 *pfFFDone = false;
1077#ifdef VBOX_WITH_REM
1078 bool fInREMState = false;
1079#endif
1080 int rc = VINF_SUCCESS;
1081 for (;;)
1082 {
1083#ifdef VBOX_WITH_REM
1084 /*
1085 * Lock REM and update the state if not already in sync.
1086 *
1087 * Note! Big lock, but you are not supposed to own any lock when
1088 * coming in here.
1089 */
1090 if (!fInREMState)
1091 {
1092 EMRemLock(pVM);
1093 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1094
1095 /* Flush the recompiler translation blocks if the VCPU has changed,
1096 also force a full CPU state resync. */
1097 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1098 {
1099 REMFlushTBs(pVM);
1100 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1101 }
1102 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1103
1104 rc = REMR3State(pVM, pVCpu);
1105
1106 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1107 if (RT_FAILURE(rc))
1108 break;
1109 fInREMState = true;
1110
1111 /*
1112 * We might have missed the raising of VMREQ, TIMER and some other
1113 * important FFs while we were busy switching the state. So, check again.
1114 */
1115 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1116 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1117 {
1118 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1119 goto l_REMDoForcedActions;
1120 }
1121 }
1122#endif
1123
1124 /*
1125 * Execute REM.
1126 */
1127 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1128 {
1129 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1130#ifdef VBOX_WITH_REM
1131 rc = REMR3Run(pVM, pVCpu);
1132#else
1133 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
1134#endif
1135 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1136 }
1137 else
1138 {
1139 /* Give up this time slice; virtual time continues */
1140 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1141 RTThreadSleep(5);
1142 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1143 rc = VINF_SUCCESS;
1144 }
1145
1146 /*
1147 * Deal with high priority post execution FFs before doing anything
1148 * else. Sync back the state and leave the lock to be on the safe side.
1149 */
1150 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1151 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1152 {
1153#ifdef VBOX_WITH_REM
1154 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1155#endif
1156 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1157 }
1158
1159 /*
1160 * Process the returned status code.
1161 */
1162 if (rc != VINF_SUCCESS)
1163 {
1164 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1165 break;
1166 if (rc != VINF_REM_INTERRUPED_FF)
1167 {
1168 /*
1169 * Anything which is not known to us means an internal error
1170 * and the termination of the VM!
1171 */
1172 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1173 break;
1174 }
1175 }
1176
1177
1178 /*
1179 * Check and execute forced actions.
1180 *
1181 * Sync back the VM state and leave the lock before calling any of
1182 * these, you never know what's going to happen here.
1183 */
1184#ifdef VBOX_HIGH_RES_TIMERS_HACK
1185 TMTimerPollVoid(pVM, pVCpu);
1186#endif
1187 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1188 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1189 || VMCPU_FF_IS_PENDING(pVCpu,
1190 VMCPU_FF_ALL_REM_MASK
1191 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1192 {
1193l_REMDoForcedActions:
1194#ifdef VBOX_WITH_REM
1195 if (fInREMState)
1196 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1197#endif
1198 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1199 rc = emR3ForcedActions(pVM, pVCpu, rc);
1200 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1201 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1202 if ( rc != VINF_SUCCESS
1203 && rc != VINF_EM_RESCHEDULE_REM)
1204 {
1205 *pfFFDone = true;
1206 break;
1207 }
1208 }
1209
1210 } /* The Inner Loop, recompiled execution mode version. */
1211
1212
1213#ifdef VBOX_WITH_REM
1214 /*
1215 * Returning. Sync back the VM state if required.
1216 */
1217 if (fInREMState)
1218 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1219#endif
1220
1221 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1222 return rc;
1223}
1224
1225
1226#ifdef DEBUG
1227
1228int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1229{
1230 EMSTATE enmOldState = pVCpu->em.s.enmState;
1231
1232 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1233
1234 Log(("Single step BEGIN:\n"));
1235 for (uint32_t i = 0; i < cIterations; i++)
1236 {
1237 DBGFR3PrgStep(pVCpu);
1238 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1239 emR3RemStep(pVM, pVCpu);
1240 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1241 break;
1242 }
1243 Log(("Single step END:\n"));
1244 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1245 pVCpu->em.s.enmState = enmOldState;
1246 return VINF_EM_RESCHEDULE;
1247}
1248
1249#endif /* DEBUG */
1250
1251
1252/**
1253 * Try execute the problematic code in IEM first, then fall back on REM if there
1254 * is too much of it or if IEM doesn't implement something.
1255 *
1256 * @returns Strict VBox status code from IEMExecLots.
1257 * @param pVM The cross context VM structure.
1258 * @param pVCpu The cross context CPU structure for the calling EMT.
1259 * @param pfFFDone Force flags done indicator.
1260 *
1261 * @thread EMT(pVCpu)
1262 */
1263static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1264{
1265 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1266 *pfFFDone = false;
1267
1268 /*
1269 * Execute in IEM for a while.
1270 */
1271 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1272 {
1273 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu);
1274 if (rcStrict != VINF_SUCCESS)
1275 {
1276 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1277 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1278 break;
1279
1280 pVCpu->em.s.cIemThenRemInstructions++;
1281 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1282 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1283 return rcStrict;
1284 }
1285 pVCpu->em.s.cIemThenRemInstructions++;
1286
1287 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1288 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1289 {
1290 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1291 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1292 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1293 pVCpu->em.s.enmState = enmNewState;
1294 return VINF_SUCCESS;
1295 }
1296
1297 /*
1298 * Check for pending actions.
1299 */
1300 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1301 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1302 return VINF_SUCCESS;
1303 }
1304
1305 /*
1306 * Switch to REM.
1307 */
1308 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1309 pVCpu->em.s.enmState = EMSTATE_REM;
1310 return VINF_SUCCESS;
1311}
1312
1313
1314/**
1315 * Decides whether to execute RAW, HWACC or REM.
1316 *
1317 * @returns new EM state
1318 * @param pVM Pointer to the VM.
1319 * @param pVCpu Pointer to the VMCPU.
1320 * @param pCtx Pointer to the guest CPU context.
1321 */
1322EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1323{
1324 /*
1325 * When forcing raw-mode execution, things are simple.
1326 */
1327 if (pVCpu->em.s.fForceRAW)
1328 return EMSTATE_RAW;
1329
1330 /*
1331 * We stay in the wait for SIPI state unless explicitly told otherwise.
1332 */
1333 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1334 return EMSTATE_WAIT_SIPI;
1335
1336 /*
1337 * Execute everything in IEM?
1338 */
1339 if (pVM->em.s.fIemExecutesAll)
1340 return EMSTATE_IEM;
1341
1342 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1343 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1344 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1345
1346 X86EFLAGS EFlags = pCtx->eflags;
1347 if (HMIsEnabled(pVM))
1348 {
1349 /*
1350 * Hardware accelerated raw-mode:
1351 */
1352 if ( EMIsHwVirtExecutionEnabled(pVM)
1353 && HMR3CanExecuteGuest(pVM, pCtx))
1354 return EMSTATE_HM;
1355
1356 /*
1357 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1358 * turns off monitoring features essential for raw mode!
1359 */
1360#ifdef VBOX_WITH_FIRST_IEM_STEP
1361 return EMSTATE_IEM_THEN_REM;
1362#else
1363 return EMSTATE_REM;
1364#endif
1365 }
1366
1367 /*
1368 * Standard raw-mode:
1369 *
1370 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1371 * or 32 bits protected mode ring 0 code
1372 *
1373 * The tests are ordered by the likelihood of being true during normal execution.
1374 */
1375 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1376 {
1377 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1378 return EMSTATE_REM;
1379 }
1380
1381# ifndef VBOX_RAW_V86
1382 if (EFlags.u32 & X86_EFL_VM) {
1383 Log2(("raw mode refused: VM_MASK\n"));
1384 return EMSTATE_REM;
1385 }
1386# endif
1387
1388 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1389 uint32_t u32CR0 = pCtx->cr0;
1390 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1391 {
1392 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1393 return EMSTATE_REM;
1394 }
1395
1396 if (pCtx->cr4 & X86_CR4_PAE)
1397 {
1398 uint32_t u32Dummy, u32Features;
1399
1400 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1401 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1402 return EMSTATE_REM;
1403 }
1404
1405 unsigned uSS = pCtx->ss.Sel;
1406 if ( pCtx->eflags.Bits.u1VM
1407 || (uSS & X86_SEL_RPL) == 3)
1408 {
1409 if (!EMIsRawRing3Enabled(pVM))
1410 return EMSTATE_REM;
1411
1412 if (!(EFlags.u32 & X86_EFL_IF))
1413 {
1414 Log2(("raw mode refused: IF (RawR3)\n"));
1415 return EMSTATE_REM;
1416 }
1417
1418 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1419 {
1420 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1421 return EMSTATE_REM;
1422 }
1423 }
1424 else
1425 {
1426 if (!EMIsRawRing0Enabled(pVM))
1427 return EMSTATE_REM;
1428
1429 if (EMIsRawRing1Enabled(pVM))
1430 {
1431 /* Only ring 0 and 1 supervisor code. */
1432 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1433 {
1434 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1435 return EMSTATE_REM;
1436 }
1437 }
1438 /* Only ring 0 supervisor code. */
1439 else if ((uSS & X86_SEL_RPL) != 0)
1440 {
1441 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1442 return EMSTATE_REM;
1443 }
1444
1445 // Let's start with pure 32 bits ring 0 code first
1446 /** @todo What's pure 32-bit mode? flat? */
1447 if ( !(pCtx->ss.Attr.n.u1DefBig)
1448 || !(pCtx->cs.Attr.n.u1DefBig))
1449 {
1450 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1451 return EMSTATE_REM;
1452 }
1453
1454 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1455 if (!(u32CR0 & X86_CR0_WP))
1456 {
1457 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1458 return EMSTATE_REM;
1459 }
1460
1461# ifdef VBOX_WITH_RAW_MODE
1462 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1463 {
1464 Log2(("raw r0 mode forced: patch code\n"));
1465# ifdef VBOX_WITH_SAFE_STR
1466 Assert(pCtx->tr.Sel);
1467# endif
1468 return EMSTATE_RAW;
1469 }
1470# endif /* VBOX_WITH_RAW_MODE */
1471
1472# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1473 if (!(EFlags.u32 & X86_EFL_IF))
1474 {
1475 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1476 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1477 return EMSTATE_REM;
1478 }
1479# endif
1480
1481# ifndef VBOX_WITH_RAW_RING1
1482 /** @todo still necessary??? */
1483 if (EFlags.Bits.u2IOPL != 0)
1484 {
1485 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1486 return EMSTATE_REM;
1487 }
1488# endif
1489 }
1490
1491 /*
1492 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1493 */
1494 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1495 {
1496 Log2(("raw mode refused: stale CS\n"));
1497 return EMSTATE_REM;
1498 }
1499 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1500 {
1501 Log2(("raw mode refused: stale SS\n"));
1502 return EMSTATE_REM;
1503 }
1504 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1505 {
1506 Log2(("raw mode refused: stale DS\n"));
1507 return EMSTATE_REM;
1508 }
1509 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1510 {
1511 Log2(("raw mode refused: stale ES\n"));
1512 return EMSTATE_REM;
1513 }
1514 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1515 {
1516 Log2(("raw mode refused: stale FS\n"));
1517 return EMSTATE_REM;
1518 }
1519 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1520 {
1521 Log2(("raw mode refused: stale GS\n"));
1522 return EMSTATE_REM;
1523 }
1524
1525# ifdef VBOX_WITH_SAFE_STR
1526 if (pCtx->tr.Sel == 0)
1527 {
1528 Log(("Raw mode refused -> TR=0\n"));
1529 return EMSTATE_REM;
1530 }
1531# endif
1532
1533 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1534 return EMSTATE_RAW;
1535}
1536
1537
1538/**
1539 * Executes all high priority post execution force actions.
1540 *
1541 * @returns rc or a fatal status code.
1542 *
1543 * @param pVM Pointer to the VM.
1544 * @param pVCpu Pointer to the VMCPU.
1545 * @param rc The current rc.
1546 */
1547int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1548{
1549 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1550
1551 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1552 PDMCritSectBothFF(pVCpu);
1553
1554 /* Update CR3 (Nested Paging case for HM). */
1555 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1556 {
1557 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1558 if (RT_FAILURE(rc2))
1559 return rc2;
1560 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1561 }
1562
1563 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1564 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1565 {
1566 if (CPUMIsGuestInPAEMode(pVCpu))
1567 {
1568 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1569 AssertPtr(pPdpes);
1570
1571 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1572 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1573 }
1574 else
1575 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1576 }
1577
1578#ifdef VBOX_WITH_RAW_MODE
1579 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1580 CSAMR3DoPendingAction(pVM, pVCpu);
1581#endif
1582
1583 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1584 {
1585 if ( rc > VINF_EM_NO_MEMORY
1586 && rc <= VINF_EM_LAST)
1587 rc = VINF_EM_NO_MEMORY;
1588 }
1589
1590 return rc;
1591}
1592
1593
1594/**
1595 * Executes all pending forced actions.
1596 *
1597 * Forced actions can cause execution delays and execution
1598 * rescheduling. The first we deal with using action priority, so
1599 * that for instance pending timers aren't scheduled and ran until
1600 * right before execution. The rescheduling we deal with using
1601 * return codes. The same goes for VM termination, only in that case
1602 * we exit everything.
1603 *
1604 * @returns VBox status code of equal or greater importance/severity than rc.
1605 * The most important ones are: VINF_EM_RESCHEDULE,
1606 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1607 *
1608 * @param pVM Pointer to the VM.
1609 * @param pVCpu Pointer to the VMCPU.
1610 * @param rc The current rc.
1611 *
1612 */
1613int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1614{
1615 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1616#ifdef VBOX_STRICT
1617 int rcIrq = VINF_SUCCESS;
1618#endif
1619 int rc2;
1620#define UPDATE_RC() \
1621 do { \
1622 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1623 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1624 break; \
1625 if (!rc || rc2 < rc) \
1626 rc = rc2; \
1627 } while (0)
1628 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1629
1630 /*
1631 * Post execution chunk first.
1632 */
1633 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1634 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1635 {
1636 /*
1637 * EMT Rendezvous (must be serviced before termination).
1638 */
1639 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1640 {
1641 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1642 UPDATE_RC();
1643 /** @todo HACK ALERT! The following test is to make sure EM+TM
1644 * thinks the VM is stopped/reset before the next VM state change
1645 * is made. We need a better solution for this, or at least make it
1646 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1647 * VINF_EM_SUSPEND). */
1648 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1649 {
1650 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1651 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1652 return rc;
1653 }
1654 }
1655
1656 /*
1657 * State change request (cleared by vmR3SetStateLocked).
1658 */
1659 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1660 {
1661 VMSTATE enmState = VMR3GetState(pVM);
1662 switch (enmState)
1663 {
1664 case VMSTATE_FATAL_ERROR:
1665 case VMSTATE_FATAL_ERROR_LS:
1666 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1667 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1668 return VINF_EM_SUSPEND;
1669
1670 case VMSTATE_DESTROYING:
1671 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1672 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1673 return VINF_EM_TERMINATE;
1674
1675 default:
1676 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1677 }
1678 }
1679
1680 /*
1681 * Debugger Facility polling.
1682 */
1683 if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF))
1684 {
1685 rc2 = DBGFR3VMMForcedAction(pVM);
1686 UPDATE_RC();
1687 }
1688
1689 /*
1690 * Postponed reset request.
1691 */
1692 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1693 {
1694 rc2 = VMR3Reset(pVM->pUVM);
1695 UPDATE_RC();
1696 }
1697
1698#ifdef VBOX_WITH_RAW_MODE
1699 /*
1700 * CSAM page scanning.
1701 */
1702 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1703 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1704 {
1705 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1706
1707 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1708 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1709
1710 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1711 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1712 }
1713#endif
1714
1715 /*
1716 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1717 */
1718 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1719 {
1720 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1721 UPDATE_RC();
1722 if (rc == VINF_EM_NO_MEMORY)
1723 return rc;
1724 }
1725
1726 /* check that we got them all */
1727 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1728 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0));
1729 }
1730
1731 /*
1732 * Normal priority then.
1733 * (Executed in no particular order.)
1734 */
1735 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1736 {
1737 /*
1738 * PDM Queues are pending.
1739 */
1740 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1741 PDMR3QueueFlushAll(pVM);
1742
1743 /*
1744 * PDM DMA transfers are pending.
1745 */
1746 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1747 PDMR3DmaRun(pVM);
1748
1749 /*
1750 * EMT Rendezvous (make sure they are handled before the requests).
1751 */
1752 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1753 {
1754 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1755 UPDATE_RC();
1756 /** @todo HACK ALERT! The following test is to make sure EM+TM
1757 * thinks the VM is stopped/reset before the next VM state change
1758 * is made. We need a better solution for this, or at least make it
1759 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1760 * VINF_EM_SUSPEND). */
1761 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1762 {
1763 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1764 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1765 return rc;
1766 }
1767 }
1768
1769 /*
1770 * Requests from other threads.
1771 */
1772 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1773 {
1774 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1775 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1776 {
1777 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1778 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1779 return rc2;
1780 }
1781 UPDATE_RC();
1782 /** @todo HACK ALERT! The following test is to make sure EM+TM
1783 * thinks the VM is stopped/reset before the next VM state change
1784 * is made. We need a better solution for this, or at least make it
1785 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1786 * VINF_EM_SUSPEND). */
1787 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1788 {
1789 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1790 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1791 return rc;
1792 }
1793 }
1794
1795#ifdef VBOX_WITH_REM
1796 /* Replay the handler notification changes. */
1797 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1798 {
1799 /* Try not to cause deadlocks. */
1800 if ( pVM->cCpus == 1
1801 || ( !PGMIsLockOwner(pVM)
1802 && !IOMIsLockWriteOwner(pVM))
1803 )
1804 {
1805 EMRemLock(pVM);
1806 REMR3ReplayHandlerNotifications(pVM);
1807 EMRemUnlock(pVM);
1808 }
1809 }
1810#endif
1811
1812 /* check that we got them all */
1813 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1814 }
1815
1816 /*
1817 * Normal priority then. (per-VCPU)
1818 * (Executed in no particular order.)
1819 */
1820 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1821 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1822 {
1823 /*
1824 * Requests from other threads.
1825 */
1826 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1827 {
1828 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1829 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1830 {
1831 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1832 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1833 return rc2;
1834 }
1835 UPDATE_RC();
1836 /** @todo HACK ALERT! The following test is to make sure EM+TM
1837 * thinks the VM is stopped/reset before the next VM state change
1838 * is made. We need a better solution for this, or at least make it
1839 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1840 * VINF_EM_SUSPEND). */
1841 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1842 {
1843 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1844 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1845 return rc;
1846 }
1847 }
1848
1849 /* check that we got them all */
1850 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1851 }
1852
1853 /*
1854 * High priority pre execution chunk last.
1855 * (Executed in ascending priority order.)
1856 */
1857 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1858 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1859 {
1860 /*
1861 * Timers before interrupts.
1862 */
1863 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1864 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1865 TMR3TimerQueuesDo(pVM);
1866
1867 /*
1868 * The instruction following an emulated STI should *always* be executed!
1869 *
1870 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1871 * the eip is the same as the inhibited instr address. Before we
1872 * are able to execute this instruction in raw mode (iret to
1873 * guest code) an external interrupt might force a world switch
1874 * again. Possibly allowing a guest interrupt to be dispatched
1875 * in the process. This could break the guest. Sounds very
1876 * unlikely, but such timing sensitive problem are not as rare as
1877 * you might think.
1878 */
1879 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1880 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1881 {
1882 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1883 {
1884 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1885 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1886 }
1887 else
1888 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1889 }
1890
1891 /*
1892 * Interrupts.
1893 */
1894 bool fWakeupPending = false;
1895 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1896 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1897 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1898 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1899#ifdef VBOX_WITH_RAW_MODE
1900 && PATMAreInterruptsEnabled(pVM)
1901#else
1902 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1903#endif
1904 && !HMR3IsEventPending(pVCpu))
1905 {
1906 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1907 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1908 {
1909 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1910 /** @todo this really isn't nice, should properly handle this */
1911 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1912 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1913 rc2 = VINF_EM_RESCHEDULE;
1914#ifdef VBOX_STRICT
1915 rcIrq = rc2;
1916#endif
1917 UPDATE_RC();
1918 /* Reschedule required: We must not miss the wakeup below! */
1919 fWakeupPending = true;
1920 }
1921#ifdef VBOX_WITH_REM
1922 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1923 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1924 {
1925 Log2(("REMR3QueryPendingInterrupt -> %#x\n", REMR3QueryPendingInterrupt(pVM, pVCpu)));
1926 rc2 = VINF_EM_RESCHEDULE_REM;
1927 UPDATE_RC();
1928 }
1929#endif
1930 }
1931
1932 /*
1933 * Allocate handy pages.
1934 */
1935 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1936 {
1937 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1938 UPDATE_RC();
1939 }
1940
1941 /*
1942 * Debugger Facility request.
1943 */
1944 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1945 {
1946 rc2 = DBGFR3VMMForcedAction(pVM);
1947 UPDATE_RC();
1948 }
1949
1950 /*
1951 * EMT Rendezvous (must be serviced before termination).
1952 */
1953 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1954 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1955 {
1956 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1957 UPDATE_RC();
1958 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1959 * stopped/reset before the next VM state change is made. We need a better
1960 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1961 * && rc >= VINF_EM_SUSPEND). */
1962 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1963 {
1964 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1965 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1966 return rc;
1967 }
1968 }
1969
1970 /*
1971 * State change request (cleared by vmR3SetStateLocked).
1972 */
1973 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1974 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1975 {
1976 VMSTATE enmState = VMR3GetState(pVM);
1977 switch (enmState)
1978 {
1979 case VMSTATE_FATAL_ERROR:
1980 case VMSTATE_FATAL_ERROR_LS:
1981 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1982 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1983 return VINF_EM_SUSPEND;
1984
1985 case VMSTATE_DESTROYING:
1986 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1987 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1988 return VINF_EM_TERMINATE;
1989
1990 default:
1991 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1992 }
1993 }
1994
1995 /*
1996 * Out of memory? Since most of our fellow high priority actions may cause us
1997 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1998 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1999 * than us since we can terminate without allocating more memory.
2000 */
2001 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2002 {
2003 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2004 UPDATE_RC();
2005 if (rc == VINF_EM_NO_MEMORY)
2006 return rc;
2007 }
2008
2009 /*
2010 * If the virtual sync clock is still stopped, make TM restart it.
2011 */
2012 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2013 TMR3VirtualSyncFF(pVM, pVCpu);
2014
2015#ifdef DEBUG
2016 /*
2017 * Debug, pause the VM.
2018 */
2019 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2020 {
2021 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2022 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2023 return VINF_EM_SUSPEND;
2024 }
2025#endif
2026
2027 /* check that we got them all */
2028 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2029 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2030 }
2031
2032#undef UPDATE_RC
2033 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2034 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2035 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2036 return rc;
2037}
2038
2039
2040/**
2041 * Check if the preset execution time cap restricts guest execution scheduling.
2042 *
2043 * @returns true if allowed, false otherwise
2044 * @param pVM Pointer to the VM.
2045 * @param pVCpu Pointer to the VMCPU.
2046 */
2047bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2048{
2049 uint64_t u64UserTime, u64KernelTime;
2050
2051 if ( pVM->uCpuExecutionCap != 100
2052 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2053 {
2054 uint64_t u64TimeNow = RTTimeMilliTS();
2055 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2056 {
2057 /* New time slice. */
2058 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2059 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2060 pVCpu->em.s.u64TimeSliceExec = 0;
2061 }
2062 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2063
2064 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2065 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2066 return false;
2067 }
2068 return true;
2069}
2070
2071
2072/**
2073 * Execute VM.
2074 *
2075 * This function is the main loop of the VM. The emulation thread
2076 * calls this function when the VM has been successfully constructed
2077 * and we're ready for executing the VM.
2078 *
2079 * Returning from this function means that the VM is turned off or
2080 * suspended (state already saved) and deconstruction is next in line.
2081 *
2082 * All interaction from other thread are done using forced actions
2083 * and signaling of the wait object.
2084 *
2085 * @returns VBox status code, informational status codes may indicate failure.
2086 * @param pVM Pointer to the VM.
2087 * @param pVCpu Pointer to the VMCPU.
2088 */
2089VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2090{
2091 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2092 pVM,
2093 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2094 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2095 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2096 pVCpu->em.s.fForceRAW));
2097 VM_ASSERT_EMT(pVM);
2098 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2099 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2100 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2101 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2102
2103 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2104 if (rc == 0)
2105 {
2106 /*
2107 * Start the virtual time.
2108 */
2109 TMR3NotifyResume(pVM, pVCpu);
2110
2111 /*
2112 * The Outer Main Loop.
2113 */
2114 bool fFFDone = false;
2115
2116 /* Reschedule right away to start in the right state. */
2117 rc = VINF_SUCCESS;
2118
2119 /* If resuming after a pause or a state load, restore the previous
2120 state or else we'll start executing code. Else, just reschedule. */
2121 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2122 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2123 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2124 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2125 else
2126 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2127 pVCpu->em.s.cIemThenRemInstructions = 0;
2128 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2129
2130 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2131 for (;;)
2132 {
2133 /*
2134 * Before we can schedule anything (we're here because
2135 * scheduling is required) we must service any pending
2136 * forced actions to avoid any pending action causing
2137 * immediate rescheduling upon entering an inner loop
2138 *
2139 * Do forced actions.
2140 */
2141 if ( !fFFDone
2142 && RT_SUCCESS(rc)
2143 && rc != VINF_EM_TERMINATE
2144 && rc != VINF_EM_OFF
2145 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2146 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2147 {
2148 rc = emR3ForcedActions(pVM, pVCpu, rc);
2149 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2150 if ( ( rc == VINF_EM_RESCHEDULE_REM
2151 || rc == VINF_EM_RESCHEDULE_HM)
2152 && pVCpu->em.s.fForceRAW)
2153 rc = VINF_EM_RESCHEDULE_RAW;
2154 }
2155 else if (fFFDone)
2156 fFFDone = false;
2157
2158 /*
2159 * Now what to do?
2160 */
2161 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2162 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2163 switch (rc)
2164 {
2165 /*
2166 * Keep doing what we're currently doing.
2167 */
2168 case VINF_SUCCESS:
2169 break;
2170
2171 /*
2172 * Reschedule - to raw-mode execution.
2173 */
2174 case VINF_EM_RESCHEDULE_RAW:
2175 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2176 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2177 pVCpu->em.s.enmState = EMSTATE_RAW;
2178 break;
2179
2180 /*
2181 * Reschedule - to hardware accelerated raw-mode execution.
2182 */
2183 case VINF_EM_RESCHEDULE_HM:
2184 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2185 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2186 Assert(!pVCpu->em.s.fForceRAW);
2187 pVCpu->em.s.enmState = EMSTATE_HM;
2188 break;
2189
2190 /*
2191 * Reschedule - to recompiled execution.
2192 */
2193 case VINF_EM_RESCHEDULE_REM:
2194#ifdef VBOX_WITH_FIRST_IEM_STEP
2195 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2196 if (HMIsEnabled(pVM))
2197 {
2198 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2199 enmOldState, EMSTATE_IEM_THEN_REM));
2200 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2201 {
2202 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2203 pVCpu->em.s.cIemThenRemInstructions = 0;
2204 }
2205 }
2206 else
2207 {
2208 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2209 pVCpu->em.s.enmState = EMSTATE_REM;
2210 }
2211#else
2212 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2213 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2214 pVCpu->em.s.enmState = EMSTATE_REM;
2215#endif
2216 break;
2217
2218 /*
2219 * Resume.
2220 */
2221 case VINF_EM_RESUME:
2222 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2223 /* Don't reschedule in the halted or wait for SIPI case. */
2224 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2225 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2226 {
2227 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2228 break;
2229 }
2230 /* fall through and get scheduled. */
2231
2232 /*
2233 * Reschedule.
2234 */
2235 case VINF_EM_RESCHEDULE:
2236 {
2237 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2238 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2239 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2240 pVCpu->em.s.cIemThenRemInstructions = 0;
2241 pVCpu->em.s.enmState = enmState;
2242 break;
2243 }
2244
2245 /*
2246 * Halted.
2247 */
2248 case VINF_EM_HALT:
2249 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2250 pVCpu->em.s.enmState = EMSTATE_HALTED;
2251 break;
2252
2253 /*
2254 * Switch to the wait for SIPI state (application processor only)
2255 */
2256 case VINF_EM_WAIT_SIPI:
2257 Assert(pVCpu->idCpu != 0);
2258 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2259 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2260 break;
2261
2262
2263 /*
2264 * Suspend.
2265 */
2266 case VINF_EM_SUSPEND:
2267 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2268 Assert(enmOldState != EMSTATE_SUSPENDED);
2269 pVCpu->em.s.enmPrevState = enmOldState;
2270 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2271 break;
2272
2273 /*
2274 * Reset.
2275 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2276 */
2277 case VINF_EM_RESET:
2278 {
2279 if (pVCpu->idCpu == 0)
2280 {
2281 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2282 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2283 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2284 pVCpu->em.s.cIemThenRemInstructions = 0;
2285 pVCpu->em.s.enmState = enmState;
2286 }
2287 else
2288 {
2289 /* All other VCPUs go into the wait for SIPI state. */
2290 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2291 }
2292 break;
2293 }
2294
2295 /*
2296 * Power Off.
2297 */
2298 case VINF_EM_OFF:
2299 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2300 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2301 TMR3NotifySuspend(pVM, pVCpu);
2302 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2303 return rc;
2304
2305 /*
2306 * Terminate the VM.
2307 */
2308 case VINF_EM_TERMINATE:
2309 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2310 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2311 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2312 TMR3NotifySuspend(pVM, pVCpu);
2313 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2314 return rc;
2315
2316
2317 /*
2318 * Out of memory, suspend the VM and stuff.
2319 */
2320 case VINF_EM_NO_MEMORY:
2321 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2322 Assert(enmOldState != EMSTATE_SUSPENDED);
2323 pVCpu->em.s.enmPrevState = enmOldState;
2324 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2325 TMR3NotifySuspend(pVM, pVCpu);
2326 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2327
2328 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2329 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2330 if (rc != VINF_EM_SUSPEND)
2331 {
2332 if (RT_SUCCESS_NP(rc))
2333 {
2334 AssertLogRelMsgFailed(("%Rrc\n", rc));
2335 rc = VERR_EM_INTERNAL_ERROR;
2336 }
2337 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2338 }
2339 return rc;
2340
2341 /*
2342 * Guest debug events.
2343 */
2344 case VINF_EM_DBG_STEPPED:
2345 case VINF_EM_DBG_STOP:
2346 case VINF_EM_DBG_BREAKPOINT:
2347 case VINF_EM_DBG_STEP:
2348 if (enmOldState == EMSTATE_RAW)
2349 {
2350 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2351 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2352 }
2353 else if (enmOldState == EMSTATE_HM)
2354 {
2355 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2356 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2357 }
2358 else if (enmOldState == EMSTATE_REM)
2359 {
2360 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2361 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2362 }
2363 else
2364 {
2365 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2366 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2367 }
2368 break;
2369
2370 /*
2371 * Hypervisor debug events.
2372 */
2373 case VINF_EM_DBG_HYPER_STEPPED:
2374 case VINF_EM_DBG_HYPER_BREAKPOINT:
2375 case VINF_EM_DBG_HYPER_ASSERTION:
2376 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2377 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2378 break;
2379
2380 /*
2381 * Guru mediations.
2382 */
2383 case VERR_VMM_RING0_ASSERTION:
2384 case VINF_EM_TRIPLE_FAULT:
2385 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2386 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2387 break;
2388
2389 /*
2390 * Any error code showing up here other than the ones we
2391 * know and process above are considered to be FATAL.
2392 *
2393 * Unknown warnings and informational status codes are also
2394 * included in this.
2395 */
2396 default:
2397 if (RT_SUCCESS_NP(rc))
2398 {
2399 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2400 rc = VERR_EM_INTERNAL_ERROR;
2401 }
2402 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2403 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2404 break;
2405 }
2406
2407 /*
2408 * Act on state transition.
2409 */
2410 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2411 if (enmOldState != enmNewState)
2412 {
2413 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2414
2415 /* Clear MWait flags. */
2416 if ( enmOldState == EMSTATE_HALTED
2417 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2418 && ( enmNewState == EMSTATE_RAW
2419 || enmNewState == EMSTATE_HM
2420 || enmNewState == EMSTATE_REM
2421 || enmNewState == EMSTATE_IEM_THEN_REM
2422 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2423 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2424 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2425 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2426 {
2427 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2428 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2429 }
2430 }
2431 else
2432 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2433
2434 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2435 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2436
2437 /*
2438 * Act on the new state.
2439 */
2440 switch (enmNewState)
2441 {
2442 /*
2443 * Execute raw.
2444 */
2445 case EMSTATE_RAW:
2446#ifdef VBOX_WITH_RAW_MODE
2447 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2448#else
2449 AssertLogRelMsgFailed(("%Rrc\n", rc));
2450 rc = VERR_EM_INTERNAL_ERROR;
2451#endif
2452 break;
2453
2454 /*
2455 * Execute hardware accelerated raw.
2456 */
2457 case EMSTATE_HM:
2458 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2459 break;
2460
2461 /*
2462 * Execute recompiled.
2463 */
2464 case EMSTATE_REM:
2465 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2466 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2467 break;
2468
2469 /*
2470 * Execute in the interpreter.
2471 */
2472 case EMSTATE_IEM:
2473 {
2474#if 0 /* For testing purposes. */
2475 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2476 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2477 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2478 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2479 rc = VINF_SUCCESS;
2480 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2481#endif
2482 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
2483 if (pVM->em.s.fIemExecutesAll)
2484 {
2485 Assert(rc != VINF_EM_RESCHEDULE_REM);
2486 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2487 Assert(rc != VINF_EM_RESCHEDULE_HM);
2488 }
2489 fFFDone = false;
2490 break;
2491 }
2492
2493 /*
2494 * Execute in IEM, hoping we can quickly switch aback to HM
2495 * or RAW execution. If our hopes fail, we go to REM.
2496 */
2497 case EMSTATE_IEM_THEN_REM:
2498 {
2499 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2500 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2501 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2502 break;
2503 }
2504
2505 /*
2506 * Application processor execution halted until SIPI.
2507 */
2508 case EMSTATE_WAIT_SIPI:
2509 /* no break */
2510 /*
2511 * hlt - execution halted until interrupt.
2512 */
2513 case EMSTATE_HALTED:
2514 {
2515 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2516 /* MWAIT has a special extension where it's woken up when
2517 an interrupt is pending even when IF=0. */
2518 if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2519 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2520 {
2521 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2522 if ( rc == VINF_SUCCESS
2523 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
2524 {
2525 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2526 rc = VINF_EM_RESCHEDULE;
2527 }
2528 }
2529 else if (TRPMHasTrap(pVCpu))
2530 rc = VINF_EM_RESCHEDULE;
2531 else
2532 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2533
2534 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2535 break;
2536 }
2537
2538 /*
2539 * Suspended - return to VM.cpp.
2540 */
2541 case EMSTATE_SUSPENDED:
2542 TMR3NotifySuspend(pVM, pVCpu);
2543 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2544 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2545 return VINF_EM_SUSPEND;
2546
2547 /*
2548 * Debugging in the guest.
2549 */
2550 case EMSTATE_DEBUG_GUEST_RAW:
2551 case EMSTATE_DEBUG_GUEST_HM:
2552 case EMSTATE_DEBUG_GUEST_IEM:
2553 case EMSTATE_DEBUG_GUEST_REM:
2554 TMR3NotifySuspend(pVM, pVCpu);
2555 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2556 TMR3NotifyResume(pVM, pVCpu);
2557 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2558 break;
2559
2560 /*
2561 * Debugging in the hypervisor.
2562 */
2563 case EMSTATE_DEBUG_HYPER:
2564 {
2565 TMR3NotifySuspend(pVM, pVCpu);
2566 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2567
2568 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2569 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2570 if (rc != VINF_SUCCESS)
2571 {
2572 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2573 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2574 else
2575 {
2576 /* switch to guru meditation mode */
2577 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2578 VMMR3FatalDump(pVM, pVCpu, rc);
2579 }
2580 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2581 return rc;
2582 }
2583
2584 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2585 TMR3NotifyResume(pVM, pVCpu);
2586 break;
2587 }
2588
2589 /*
2590 * Guru meditation takes place in the debugger.
2591 */
2592 case EMSTATE_GURU_MEDITATION:
2593 {
2594 TMR3NotifySuspend(pVM, pVCpu);
2595 VMMR3FatalDump(pVM, pVCpu, rc);
2596 emR3Debug(pVM, pVCpu, rc);
2597 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2598 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2599 return rc;
2600 }
2601
2602 /*
2603 * The states we don't expect here.
2604 */
2605 case EMSTATE_NONE:
2606 case EMSTATE_TERMINATING:
2607 default:
2608 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2609 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2610 TMR3NotifySuspend(pVM, pVCpu);
2611 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2612 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2613 return VERR_EM_INTERNAL_ERROR;
2614 }
2615 } /* The Outer Main Loop */
2616 }
2617 else
2618 {
2619 /*
2620 * Fatal error.
2621 */
2622 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2623 TMR3NotifySuspend(pVM, pVCpu);
2624 VMMR3FatalDump(pVM, pVCpu, rc);
2625 emR3Debug(pVM, pVCpu, rc);
2626 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2627 /** @todo change the VM state! */
2628 return rc;
2629 }
2630
2631 /* (won't ever get here). */
2632 AssertFailed();
2633}
2634
2635/**
2636 * Notify EM of a state change (used by FTM)
2637 *
2638 * @param pVM Pointer to the VM.
2639 */
2640VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2641{
2642 PVMCPU pVCpu = VMMGetCpu(pVM);
2643
2644 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2645 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2646 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2647 return VINF_SUCCESS;
2648}
2649
2650/**
2651 * Notify EM of a state change (used by FTM)
2652 *
2653 * @param pVM Pointer to the VM.
2654 */
2655VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2656{
2657 PVMCPU pVCpu = VMMGetCpu(pVM);
2658 EMSTATE enmCurState = pVCpu->em.s.enmState;
2659
2660 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2661 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2662 pVCpu->em.s.enmPrevState = enmCurState;
2663 return VINF_SUCCESS;
2664}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette