VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 60850

Last change on this file since 60850 was 60847, checked in by vboxsync, 9 years ago

IOM: New way of defer RC+R0 I/O port writes, prepping for MMIO writes.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 123.9 KB
Line 
1/* $Id: EM.cpp 60847 2016-05-05 15:24:46Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#ifdef VBOX_WITH_NEW_APIC
53# include <VBox/vmm/apic.h>
54#endif
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
95static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
96int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
97
98
99/**
100 * Initializes the EM.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 */
105VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
106{
107 LogFlow(("EMR3Init\n"));
108 /*
109 * Assert alignment and sizes.
110 */
111 AssertCompileMemberAlignment(VM, em.s, 32);
112 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
113 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
114
115 /*
116 * Init the structure.
117 */
118 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
119 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
120 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
121
122 bool fEnabled;
123 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileUser = !fEnabled;
126
127 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->fRecompileSupervisor = !fEnabled;
130
131#ifdef VBOX_WITH_RAW_RING1
132 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
133 AssertLogRelRCReturn(rc, rc);
134#else
135 pVM->fRawRing1Enabled = false; /* Disabled by default. */
136#endif
137
138 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
139 AssertLogRelRCReturn(rc, rc);
140
141 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
142 AssertLogRelRCReturn(rc, rc);
143 pVM->em.s.fGuruOnTripleFault = !fEnabled;
144 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
145 {
146 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
147 pVM->em.s.fGuruOnTripleFault = true;
148 }
149
150 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
151 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
152
153#ifdef VBOX_WITH_REM
154 /*
155 * Initialize the REM critical section.
156 */
157 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
158 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
159 AssertRCReturn(rc, rc);
160#endif
161
162 /*
163 * Saved state.
164 */
165 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
166 NULL, NULL, NULL,
167 NULL, emR3Save, NULL,
168 NULL, emR3Load, NULL);
169 if (RT_FAILURE(rc))
170 return rc;
171
172 for (VMCPUID i = 0; i < pVM->cCpus; i++)
173 {
174 PVMCPU pVCpu = &pVM->aCpus[i];
175
176 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
177 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
178 pVCpu->em.s.fForceRAW = false;
179
180 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
181#ifdef VBOX_WITH_RAW_MODE
182 if (!HMIsEnabled(pVM))
183 {
184 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
185 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
186 }
187#endif
188
189 /* Force reset of the time slice. */
190 pVCpu->em.s.u64TimeSliceStart = 0;
191
192# define EM_REG_COUNTER(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_COUNTER_USED(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204# define EM_REG_PROFILE_ADV(a, b, c) \
205 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
206 AssertRC(rc);
207
208 /*
209 * Statistics.
210 */
211#ifdef VBOX_WITH_STATISTICS
212 PEMSTATS pStats;
213 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
214 if (RT_FAILURE(rc))
215 return rc;
216
217 pVCpu->em.s.pStatsR3 = pStats;
218 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
219 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
220
221 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
222 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
223
224 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
225 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226
227 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301
302 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
303 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
304
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391
392 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
393 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
397 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
418
419 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
420 pVCpu->em.s.pCliStatTree = 0;
421
422 /* these should be considered for release statistics. */
423 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
424 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
429 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
433 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
435
436#endif /* VBOX_WITH_STATISTICS */
437
438 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
439 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
440 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
441 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
442 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
443
444 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
445 }
446
447 emR3InitDbg(pVM);
448 return VINF_SUCCESS;
449}
450
451
452/**
453 * Applies relocations to data and code managed by this
454 * component. This function will be called at init and
455 * whenever the VMM need to relocate it self inside the GC.
456 *
457 * @param pVM The cross context VM structure.
458 */
459VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
460{
461 LogFlow(("EMR3Relocate\n"));
462 for (VMCPUID i = 0; i < pVM->cCpus; i++)
463 {
464 PVMCPU pVCpu = &pVM->aCpus[i];
465 if (pVCpu->em.s.pStatsR3)
466 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
467 }
468}
469
470
471/**
472 * Reset the EM state for a CPU.
473 *
474 * Called by EMR3Reset and hot plugging.
475 *
476 * @param pVCpu The cross context virtual CPU structure.
477 */
478VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
479{
480 pVCpu->em.s.fForceRAW = false;
481
482 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
483 out of the HALTED state here so that enmPrevState doesn't end up as
484 HALTED when EMR3Execute returns. */
485 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
486 {
487 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
488 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
489 }
490}
491
492
493/**
494 * Reset notification.
495 *
496 * @param pVM The cross context VM structure.
497 */
498VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
499{
500 Log(("EMR3Reset: \n"));
501 for (VMCPUID i = 0; i < pVM->cCpus; i++)
502 EMR3ResetCpu(&pVM->aCpus[i]);
503}
504
505
506/**
507 * Terminates the EM.
508 *
509 * Termination means cleaning up and freeing all resources,
510 * the VM it self is at this point powered off or suspended.
511 *
512 * @returns VBox status code.
513 * @param pVM The cross context VM structure.
514 */
515VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
516{
517 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
518
519#ifdef VBOX_WITH_REM
520 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
521#endif
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Execute state save operation.
528 *
529 * @returns VBox status code.
530 * @param pVM The cross context VM structure.
531 * @param pSSM SSM operation handle.
532 */
533static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
534{
535 for (VMCPUID i = 0; i < pVM->cCpus; i++)
536 {
537 PVMCPU pVCpu = &pVM->aCpus[i];
538
539 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
540 AssertRCReturn(rc, rc);
541
542 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
543 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
544 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
545 AssertRCReturn(rc, rc);
546
547 /* Save mwait state. */
548 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
549 AssertRCReturn(rc, rc);
550 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
551 AssertRCReturn(rc, rc);
552 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
553 AssertRCReturn(rc, rc);
554 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
555 AssertRCReturn(rc, rc);
556 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
557 AssertRCReturn(rc, rc);
558 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
559 AssertRCReturn(rc, rc);
560 }
561 return VINF_SUCCESS;
562}
563
564
565/**
566 * Execute state load operation.
567 *
568 * @returns VBox status code.
569 * @param pVM The cross context VM structure.
570 * @param pSSM SSM operation handle.
571 * @param uVersion Data layout version.
572 * @param uPass The data pass.
573 */
574static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
575{
576 /*
577 * Validate version.
578 */
579 if ( uVersion > EM_SAVED_STATE_VERSION
580 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
581 {
582 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
583 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
584 }
585 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
586
587 /*
588 * Load the saved state.
589 */
590 for (VMCPUID i = 0; i < pVM->cCpus; i++)
591 {
592 PVMCPU pVCpu = &pVM->aCpus[i];
593
594 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
595 if (RT_FAILURE(rc))
596 pVCpu->em.s.fForceRAW = false;
597 AssertRCReturn(rc, rc);
598
599 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
600 {
601 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
602 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
603 AssertRCReturn(rc, rc);
604 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
605
606 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
607 }
608 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
609 {
610 /* Load mwait state. */
611 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
612 AssertRCReturn(rc, rc);
613 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
614 AssertRCReturn(rc, rc);
615 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
616 AssertRCReturn(rc, rc);
617 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
618 AssertRCReturn(rc, rc);
619 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
620 AssertRCReturn(rc, rc);
621 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
622 AssertRCReturn(rc, rc);
623 }
624
625 Assert(!pVCpu->em.s.pCliStatTree);
626 }
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Argument packet for emR3SetExecutionPolicy.
633 */
634struct EMR3SETEXECPOLICYARGS
635{
636 EMEXECPOLICY enmPolicy;
637 bool fEnforce;
638};
639
640
641/**
642 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
643 */
644static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
645{
646 /*
647 * Only the first CPU changes the variables.
648 */
649 if (pVCpu->idCpu == 0)
650 {
651 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
652 switch (pArgs->enmPolicy)
653 {
654 case EMEXECPOLICY_RECOMPILE_RING0:
655 pVM->fRecompileSupervisor = pArgs->fEnforce;
656 break;
657 case EMEXECPOLICY_RECOMPILE_RING3:
658 pVM->fRecompileUser = pArgs->fEnforce;
659 break;
660 case EMEXECPOLICY_IEM_ALL:
661 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
662 break;
663 default:
664 AssertFailedReturn(VERR_INVALID_PARAMETER);
665 }
666 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
667 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
668 }
669
670 /*
671 * Force rescheduling if in RAW, HM, IEM, or REM.
672 */
673 return pVCpu->em.s.enmState == EMSTATE_RAW
674 || pVCpu->em.s.enmState == EMSTATE_HM
675 || pVCpu->em.s.enmState == EMSTATE_IEM
676 || pVCpu->em.s.enmState == EMSTATE_REM
677 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
678 ? VINF_EM_RESCHEDULE
679 : VINF_SUCCESS;
680}
681
682
683/**
684 * Changes an execution scheduling policy parameter.
685 *
686 * This is used to enable or disable raw-mode / hardware-virtualization
687 * execution of user and supervisor code.
688 *
689 * @returns VINF_SUCCESS on success.
690 * @returns VINF_RESCHEDULE if a rescheduling might be required.
691 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
692 *
693 * @param pUVM The user mode VM handle.
694 * @param enmPolicy The scheduling policy to change.
695 * @param fEnforce Whether to enforce the policy or not.
696 */
697VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
698{
699 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
700 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
701 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
702
703 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
704 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
705}
706
707
708/**
709 * Queries an execution scheduling policy parameter.
710 *
711 * @returns VBox status code
712 * @param pUVM The user mode VM handle.
713 * @param enmPolicy The scheduling policy to query.
714 * @param pfEnforced Where to return the current value.
715 */
716VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
717{
718 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
719 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
720 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
721 PVM pVM = pUVM->pVM;
722 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
723
724 /* No need to bother EMTs with a query. */
725 switch (enmPolicy)
726 {
727 case EMEXECPOLICY_RECOMPILE_RING0:
728 *pfEnforced = pVM->fRecompileSupervisor;
729 break;
730 case EMEXECPOLICY_RECOMPILE_RING3:
731 *pfEnforced = pVM->fRecompileUser;
732 break;
733 case EMEXECPOLICY_IEM_ALL:
734 *pfEnforced = pVM->em.s.fIemExecutesAll;
735 break;
736 default:
737 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
738 }
739
740 return VINF_SUCCESS;
741}
742
743
744/**
745 * Raise a fatal error.
746 *
747 * Safely terminate the VM with full state report and stuff. This function
748 * will naturally never return.
749 *
750 * @param pVCpu The cross context virtual CPU structure.
751 * @param rc VBox status code.
752 */
753VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
754{
755 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
756 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
757 AssertReleaseMsgFailed(("longjmp returned!\n"));
758}
759
760
761#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
762/**
763 * Gets the EM state name.
764 *
765 * @returns pointer to read only state name,
766 * @param enmState The state.
767 */
768static const char *emR3GetStateName(EMSTATE enmState)
769{
770 switch (enmState)
771 {
772 case EMSTATE_NONE: return "EMSTATE_NONE";
773 case EMSTATE_RAW: return "EMSTATE_RAW";
774 case EMSTATE_HM: return "EMSTATE_HM";
775 case EMSTATE_IEM: return "EMSTATE_IEM";
776 case EMSTATE_REM: return "EMSTATE_REM";
777 case EMSTATE_HALTED: return "EMSTATE_HALTED";
778 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
779 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
780 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
781 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
782 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
783 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
784 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
785 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
786 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
787 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
788 default: return "Unknown!";
789 }
790}
791#endif /* LOG_ENABLED || VBOX_STRICT */
792
793
794/**
795 * Debug loop.
796 *
797 * @returns VBox status code for EM.
798 * @param pVM The cross context VM structure.
799 * @param pVCpu The cross context virtual CPU structure.
800 * @param rc Current EM VBox status code.
801 */
802static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
803{
804 for (;;)
805 {
806 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
807 const VBOXSTRICTRC rcLast = rc;
808
809 /*
810 * Debug related RC.
811 */
812 switch (VBOXSTRICTRC_VAL(rc))
813 {
814 /*
815 * Single step an instruction.
816 */
817 case VINF_EM_DBG_STEP:
818 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
819 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
820 || pVCpu->em.s.fForceRAW /* paranoia */)
821#ifdef VBOX_WITH_RAW_MODE
822 rc = emR3RawStep(pVM, pVCpu);
823#else
824 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
825#endif
826 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
827 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
828#ifdef VBOX_WITH_REM
829 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
830 rc = emR3RemStep(pVM, pVCpu);
831#endif
832 else
833 {
834 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
835 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
836 rc = VINF_EM_DBG_STEPPED;
837 }
838 break;
839
840 /*
841 * Simple events: stepped, breakpoint, stop/assertion.
842 */
843 case VINF_EM_DBG_STEPPED:
844 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
845 break;
846
847 case VINF_EM_DBG_BREAKPOINT:
848 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
849 break;
850
851 case VINF_EM_DBG_STOP:
852 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
853 break;
854
855 case VINF_EM_DBG_EVENT:
856 rc = DBGFR3EventHandlePending(pVM, pVCpu);
857 break;
858
859 case VINF_EM_DBG_HYPER_STEPPED:
860 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
861 break;
862
863 case VINF_EM_DBG_HYPER_BREAKPOINT:
864 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
865 break;
866
867 case VINF_EM_DBG_HYPER_ASSERTION:
868 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
869 RTLogFlush(NULL);
870 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
871 break;
872
873 /*
874 * Guru meditation.
875 */
876 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
877 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
878 break;
879 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
880 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
881 break;
882 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
883 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
884 break;
885
886 default: /** @todo don't use default for guru, but make special errors code! */
887 {
888 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
889 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
890 break;
891 }
892 }
893
894 /*
895 * Process the result.
896 */
897 do
898 {
899 switch (VBOXSTRICTRC_VAL(rc))
900 {
901 /*
902 * Continue the debugging loop.
903 */
904 case VINF_EM_DBG_STEP:
905 case VINF_EM_DBG_STOP:
906 case VINF_EM_DBG_EVENT:
907 case VINF_EM_DBG_STEPPED:
908 case VINF_EM_DBG_BREAKPOINT:
909 case VINF_EM_DBG_HYPER_STEPPED:
910 case VINF_EM_DBG_HYPER_BREAKPOINT:
911 case VINF_EM_DBG_HYPER_ASSERTION:
912 break;
913
914 /*
915 * Resuming execution (in some form) has to be done here if we got
916 * a hypervisor debug event.
917 */
918 case VINF_SUCCESS:
919 case VINF_EM_RESUME:
920 case VINF_EM_SUSPEND:
921 case VINF_EM_RESCHEDULE:
922 case VINF_EM_RESCHEDULE_RAW:
923 case VINF_EM_RESCHEDULE_REM:
924 case VINF_EM_HALT:
925 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
926 {
927#ifdef VBOX_WITH_RAW_MODE
928 rc = emR3RawResumeHyper(pVM, pVCpu);
929 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
930 continue;
931#else
932 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
933#endif
934 }
935 if (rc == VINF_SUCCESS)
936 rc = VINF_EM_RESCHEDULE;
937 return rc;
938
939 /*
940 * The debugger isn't attached.
941 * We'll simply turn the thing off since that's the easiest thing to do.
942 */
943 case VERR_DBGF_NOT_ATTACHED:
944 switch (VBOXSTRICTRC_VAL(rcLast))
945 {
946 case VINF_EM_DBG_HYPER_STEPPED:
947 case VINF_EM_DBG_HYPER_BREAKPOINT:
948 case VINF_EM_DBG_HYPER_ASSERTION:
949 case VERR_TRPM_PANIC:
950 case VERR_TRPM_DONT_PANIC:
951 case VERR_VMM_RING0_ASSERTION:
952 case VERR_VMM_HYPER_CR3_MISMATCH:
953 case VERR_VMM_RING3_CALL_DISABLED:
954 return rcLast;
955 }
956 return VINF_EM_OFF;
957
958 /*
959 * Status codes terminating the VM in one or another sense.
960 */
961 case VINF_EM_TERMINATE:
962 case VINF_EM_OFF:
963 case VINF_EM_RESET:
964 case VINF_EM_NO_MEMORY:
965 case VINF_EM_RAW_STALE_SELECTOR:
966 case VINF_EM_RAW_IRET_TRAP:
967 case VERR_TRPM_PANIC:
968 case VERR_TRPM_DONT_PANIC:
969 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
970 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
971 case VERR_VMM_RING0_ASSERTION:
972 case VERR_VMM_HYPER_CR3_MISMATCH:
973 case VERR_VMM_RING3_CALL_DISABLED:
974 case VERR_INTERNAL_ERROR:
975 case VERR_INTERNAL_ERROR_2:
976 case VERR_INTERNAL_ERROR_3:
977 case VERR_INTERNAL_ERROR_4:
978 case VERR_INTERNAL_ERROR_5:
979 case VERR_IPE_UNEXPECTED_STATUS:
980 case VERR_IPE_UNEXPECTED_INFO_STATUS:
981 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
982 return rc;
983
984 /*
985 * The rest is unexpected, and will keep us here.
986 */
987 default:
988 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
989 break;
990 }
991 } while (false);
992 } /* debug for ever */
993}
994
995
996/**
997 * Steps recompiled code.
998 *
999 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1000 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1001 *
1002 * @param pVM The cross context VM structure.
1003 * @param pVCpu The cross context virtual CPU structure.
1004 */
1005static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1006{
1007 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1008
1009#ifdef VBOX_WITH_REM
1010 EMRemLock(pVM);
1011
1012 /*
1013 * Switch to REM, step instruction, switch back.
1014 */
1015 int rc = REMR3State(pVM, pVCpu);
1016 if (RT_SUCCESS(rc))
1017 {
1018 rc = REMR3Step(pVM, pVCpu);
1019 REMR3StateBack(pVM, pVCpu);
1020 }
1021 EMRemUnlock(pVM);
1022
1023#else
1024 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1025#endif
1026
1027 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1028 return rc;
1029}
1030
1031
1032/**
1033 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1034 * critical section.
1035 *
1036 * @returns false - new fInREMState value.
1037 * @param pVM The cross context VM structure.
1038 * @param pVCpu The cross context virtual CPU structure.
1039 */
1040DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1041{
1042#ifdef VBOX_WITH_REM
1043 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1044 REMR3StateBack(pVM, pVCpu);
1045 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1046
1047 EMRemUnlock(pVM);
1048#endif
1049 return false;
1050}
1051
1052
1053/**
1054 * Executes recompiled code.
1055 *
1056 * This function contains the recompiler version of the inner
1057 * execution loop (the outer loop being in EMR3ExecuteVM()).
1058 *
1059 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1060 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1061 *
1062 * @param pVM The cross context VM structure.
1063 * @param pVCpu The cross context virtual CPU structure.
1064 * @param pfFFDone Where to store an indicator telling whether or not
1065 * FFs were done before returning.
1066 *
1067 */
1068static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1069{
1070#ifdef LOG_ENABLED
1071 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1072 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1073
1074 if (pCtx->eflags.Bits.u1VM)
1075 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1076 else
1077 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1078#endif
1079 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1080
1081#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1082 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1083 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1084 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1085#endif
1086
1087 /*
1088 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1089 * or the REM suggests raw-mode execution.
1090 */
1091 *pfFFDone = false;
1092#ifdef VBOX_WITH_REM
1093 bool fInREMState = false;
1094#endif
1095 int rc = VINF_SUCCESS;
1096 for (;;)
1097 {
1098#ifdef VBOX_WITH_REM
1099 /*
1100 * Lock REM and update the state if not already in sync.
1101 *
1102 * Note! Big lock, but you are not supposed to own any lock when
1103 * coming in here.
1104 */
1105 if (!fInREMState)
1106 {
1107 EMRemLock(pVM);
1108 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1109
1110 /* Flush the recompiler translation blocks if the VCPU has changed,
1111 also force a full CPU state resync. */
1112 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1113 {
1114 REMFlushTBs(pVM);
1115 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1116 }
1117 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1118
1119 rc = REMR3State(pVM, pVCpu);
1120
1121 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1122 if (RT_FAILURE(rc))
1123 break;
1124 fInREMState = true;
1125
1126 /*
1127 * We might have missed the raising of VMREQ, TIMER and some other
1128 * important FFs while we were busy switching the state. So, check again.
1129 */
1130 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1131 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1132 {
1133 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1134 goto l_REMDoForcedActions;
1135 }
1136 }
1137#endif
1138
1139 /*
1140 * Execute REM.
1141 */
1142 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1143 {
1144 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1145#ifdef VBOX_WITH_REM
1146 rc = REMR3Run(pVM, pVCpu);
1147#else
1148 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
1149#endif
1150 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1151 }
1152 else
1153 {
1154 /* Give up this time slice; virtual time continues */
1155 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1156 RTThreadSleep(5);
1157 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1158 rc = VINF_SUCCESS;
1159 }
1160
1161 /*
1162 * Deal with high priority post execution FFs before doing anything
1163 * else. Sync back the state and leave the lock to be on the safe side.
1164 */
1165 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1166 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1167 {
1168#ifdef VBOX_WITH_REM
1169 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1170#endif
1171 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1172 }
1173
1174 /*
1175 * Process the returned status code.
1176 */
1177 if (rc != VINF_SUCCESS)
1178 {
1179 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1180 break;
1181 if (rc != VINF_REM_INTERRUPED_FF)
1182 {
1183 /*
1184 * Anything which is not known to us means an internal error
1185 * and the termination of the VM!
1186 */
1187 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1188 break;
1189 }
1190 }
1191
1192
1193 /*
1194 * Check and execute forced actions.
1195 *
1196 * Sync back the VM state and leave the lock before calling any of
1197 * these, you never know what's going to happen here.
1198 */
1199#ifdef VBOX_HIGH_RES_TIMERS_HACK
1200 TMTimerPollVoid(pVM, pVCpu);
1201#endif
1202 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1203 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1204 || VMCPU_FF_IS_PENDING(pVCpu,
1205 VMCPU_FF_ALL_REM_MASK
1206 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1207 {
1208#ifdef VBOX_WITH_REM
1209l_REMDoForcedActions:
1210 if (fInREMState)
1211 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1212#endif
1213 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1214 rc = emR3ForcedActions(pVM, pVCpu, rc);
1215 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1216 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1217 if ( rc != VINF_SUCCESS
1218 && rc != VINF_EM_RESCHEDULE_REM)
1219 {
1220 *pfFFDone = true;
1221 break;
1222 }
1223 }
1224
1225 } /* The Inner Loop, recompiled execution mode version. */
1226
1227
1228#ifdef VBOX_WITH_REM
1229 /*
1230 * Returning. Sync back the VM state if required.
1231 */
1232 if (fInREMState)
1233 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1234#endif
1235
1236 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1237 return rc;
1238}
1239
1240
1241#ifdef DEBUG
1242
1243int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1244{
1245 EMSTATE enmOldState = pVCpu->em.s.enmState;
1246
1247 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1248
1249 Log(("Single step BEGIN:\n"));
1250 for (uint32_t i = 0; i < cIterations; i++)
1251 {
1252 DBGFR3PrgStep(pVCpu);
1253 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1254 emR3RemStep(pVM, pVCpu);
1255 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1256 break;
1257 }
1258 Log(("Single step END:\n"));
1259 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1260 pVCpu->em.s.enmState = enmOldState;
1261 return VINF_EM_RESCHEDULE;
1262}
1263
1264#endif /* DEBUG */
1265
1266
1267/**
1268 * Try execute the problematic code in IEM first, then fall back on REM if there
1269 * is too much of it or if IEM doesn't implement something.
1270 *
1271 * @returns Strict VBox status code from IEMExecLots.
1272 * @param pVM The cross context VM structure.
1273 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1274 * @param pfFFDone Force flags done indicator.
1275 *
1276 * @thread EMT(pVCpu)
1277 */
1278static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1279{
1280 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1281 *pfFFDone = false;
1282
1283 /*
1284 * Execute in IEM for a while.
1285 */
1286 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1287 {
1288 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu);
1289 if (rcStrict != VINF_SUCCESS)
1290 {
1291 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1292 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1293 break;
1294
1295 pVCpu->em.s.cIemThenRemInstructions++;
1296 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1297 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1298 return rcStrict;
1299 }
1300 pVCpu->em.s.cIemThenRemInstructions++;
1301
1302 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1303 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1304 {
1305 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1306 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1307 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1308 pVCpu->em.s.enmState = enmNewState;
1309 return VINF_SUCCESS;
1310 }
1311
1312 /*
1313 * Check for pending actions.
1314 */
1315 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1316 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1317 return VINF_SUCCESS;
1318 }
1319
1320 /*
1321 * Switch to REM.
1322 */
1323 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1324 pVCpu->em.s.enmState = EMSTATE_REM;
1325 return VINF_SUCCESS;
1326}
1327
1328
1329/**
1330 * Decides whether to execute RAW, HWACC or REM.
1331 *
1332 * @returns new EM state
1333 * @param pVM The cross context VM structure.
1334 * @param pVCpu The cross context virtual CPU structure.
1335 * @param pCtx Pointer to the guest CPU context.
1336 */
1337EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1338{
1339 /*
1340 * When forcing raw-mode execution, things are simple.
1341 */
1342 if (pVCpu->em.s.fForceRAW)
1343 return EMSTATE_RAW;
1344
1345 /*
1346 * We stay in the wait for SIPI state unless explicitly told otherwise.
1347 */
1348 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1349 return EMSTATE_WAIT_SIPI;
1350
1351 /*
1352 * Execute everything in IEM?
1353 */
1354 if (pVM->em.s.fIemExecutesAll)
1355 return EMSTATE_IEM;
1356
1357 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1358 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1359 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1360
1361 X86EFLAGS EFlags = pCtx->eflags;
1362 if (HMIsEnabled(pVM))
1363 {
1364 /*
1365 * Hardware accelerated raw-mode:
1366 */
1367 if ( EMIsHwVirtExecutionEnabled(pVM)
1368 && HMR3CanExecuteGuest(pVM, pCtx))
1369 return EMSTATE_HM;
1370
1371 /*
1372 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1373 * turns off monitoring features essential for raw mode!
1374 */
1375 return EMSTATE_IEM_THEN_REM;
1376 }
1377
1378 /*
1379 * Standard raw-mode:
1380 *
1381 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1382 * or 32 bits protected mode ring 0 code
1383 *
1384 * The tests are ordered by the likelihood of being true during normal execution.
1385 */
1386 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1387 {
1388 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1389 return EMSTATE_REM;
1390 }
1391
1392# ifndef VBOX_RAW_V86
1393 if (EFlags.u32 & X86_EFL_VM) {
1394 Log2(("raw mode refused: VM_MASK\n"));
1395 return EMSTATE_REM;
1396 }
1397# endif
1398
1399 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1400 uint32_t u32CR0 = pCtx->cr0;
1401 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1402 {
1403 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1404 return EMSTATE_REM;
1405 }
1406
1407 if (pCtx->cr4 & X86_CR4_PAE)
1408 {
1409 uint32_t u32Dummy, u32Features;
1410
1411 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1412 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1413 return EMSTATE_REM;
1414 }
1415
1416 unsigned uSS = pCtx->ss.Sel;
1417 if ( pCtx->eflags.Bits.u1VM
1418 || (uSS & X86_SEL_RPL) == 3)
1419 {
1420 if (!EMIsRawRing3Enabled(pVM))
1421 return EMSTATE_REM;
1422
1423 if (!(EFlags.u32 & X86_EFL_IF))
1424 {
1425 Log2(("raw mode refused: IF (RawR3)\n"));
1426 return EMSTATE_REM;
1427 }
1428
1429 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1430 {
1431 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1432 return EMSTATE_REM;
1433 }
1434 }
1435 else
1436 {
1437 if (!EMIsRawRing0Enabled(pVM))
1438 return EMSTATE_REM;
1439
1440 if (EMIsRawRing1Enabled(pVM))
1441 {
1442 /* Only ring 0 and 1 supervisor code. */
1443 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1444 {
1445 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1446 return EMSTATE_REM;
1447 }
1448 }
1449 /* Only ring 0 supervisor code. */
1450 else if ((uSS & X86_SEL_RPL) != 0)
1451 {
1452 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1453 return EMSTATE_REM;
1454 }
1455
1456 // Let's start with pure 32 bits ring 0 code first
1457 /** @todo What's pure 32-bit mode? flat? */
1458 if ( !(pCtx->ss.Attr.n.u1DefBig)
1459 || !(pCtx->cs.Attr.n.u1DefBig))
1460 {
1461 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1462 return EMSTATE_REM;
1463 }
1464
1465 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1466 if (!(u32CR0 & X86_CR0_WP))
1467 {
1468 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1469 return EMSTATE_REM;
1470 }
1471
1472# ifdef VBOX_WITH_RAW_MODE
1473 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1474 {
1475 Log2(("raw r0 mode forced: patch code\n"));
1476# ifdef VBOX_WITH_SAFE_STR
1477 Assert(pCtx->tr.Sel);
1478# endif
1479 return EMSTATE_RAW;
1480 }
1481# endif /* VBOX_WITH_RAW_MODE */
1482
1483# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1484 if (!(EFlags.u32 & X86_EFL_IF))
1485 {
1486 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1487 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1488 return EMSTATE_REM;
1489 }
1490# endif
1491
1492# ifndef VBOX_WITH_RAW_RING1
1493 /** @todo still necessary??? */
1494 if (EFlags.Bits.u2IOPL != 0)
1495 {
1496 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1497 return EMSTATE_REM;
1498 }
1499# endif
1500 }
1501
1502 /*
1503 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1504 */
1505 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1506 {
1507 Log2(("raw mode refused: stale CS\n"));
1508 return EMSTATE_REM;
1509 }
1510 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1511 {
1512 Log2(("raw mode refused: stale SS\n"));
1513 return EMSTATE_REM;
1514 }
1515 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1516 {
1517 Log2(("raw mode refused: stale DS\n"));
1518 return EMSTATE_REM;
1519 }
1520 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1521 {
1522 Log2(("raw mode refused: stale ES\n"));
1523 return EMSTATE_REM;
1524 }
1525 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1526 {
1527 Log2(("raw mode refused: stale FS\n"));
1528 return EMSTATE_REM;
1529 }
1530 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1531 {
1532 Log2(("raw mode refused: stale GS\n"));
1533 return EMSTATE_REM;
1534 }
1535
1536# ifdef VBOX_WITH_SAFE_STR
1537 if (pCtx->tr.Sel == 0)
1538 {
1539 Log(("Raw mode refused -> TR=0\n"));
1540 return EMSTATE_REM;
1541 }
1542# endif
1543
1544 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1545 return EMSTATE_RAW;
1546}
1547
1548
1549/**
1550 * Executes all high priority post execution force actions.
1551 *
1552 * @returns rc or a fatal status code.
1553 *
1554 * @param pVM The cross context VM structure.
1555 * @param pVCpu The cross context virtual CPU structure.
1556 * @param rc The current rc.
1557 */
1558int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1559{
1560 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1561
1562 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1563 PDMCritSectBothFF(pVCpu);
1564
1565 /* Update CR3 (Nested Paging case for HM). */
1566 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1567 {
1568 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1569 if (RT_FAILURE(rc2))
1570 return rc2;
1571 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1572 }
1573
1574 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1575 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1576 {
1577 if (CPUMIsGuestInPAEMode(pVCpu))
1578 {
1579 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1580 AssertPtr(pPdpes);
1581
1582 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1583 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1584 }
1585 else
1586 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1587 }
1588
1589 /* IEM has pending work (typically memory write after INS instruction). */
1590 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1591 rc = VBOXSTRICTRC_TODO(IEMR3DoPendingAction(pVCpu, rc));
1592
1593 /* IOM has pending work (comitting an I/O or MMIO write). */
1594 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1595 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1596
1597#ifdef VBOX_WITH_RAW_MODE
1598 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1599 CSAMR3DoPendingAction(pVM, pVCpu);
1600#endif
1601
1602 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1603 {
1604 if ( rc > VINF_EM_NO_MEMORY
1605 && rc <= VINF_EM_LAST)
1606 rc = VINF_EM_NO_MEMORY;
1607 }
1608
1609 return rc;
1610}
1611
1612
1613/**
1614 * Executes all pending forced actions.
1615 *
1616 * Forced actions can cause execution delays and execution
1617 * rescheduling. The first we deal with using action priority, so
1618 * that for instance pending timers aren't scheduled and ran until
1619 * right before execution. The rescheduling we deal with using
1620 * return codes. The same goes for VM termination, only in that case
1621 * we exit everything.
1622 *
1623 * @returns VBox status code of equal or greater importance/severity than rc.
1624 * The most important ones are: VINF_EM_RESCHEDULE,
1625 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1626 *
1627 * @param pVM The cross context VM structure.
1628 * @param pVCpu The cross context virtual CPU structure.
1629 * @param rc The current rc.
1630 *
1631 */
1632int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1633{
1634 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1635#ifdef VBOX_STRICT
1636 int rcIrq = VINF_SUCCESS;
1637#endif
1638 int rc2;
1639#define UPDATE_RC() \
1640 do { \
1641 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1642 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1643 break; \
1644 if (!rc || rc2 < rc) \
1645 rc = rc2; \
1646 } while (0)
1647 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1648
1649 /*
1650 * Post execution chunk first.
1651 */
1652 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1653 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1654 {
1655 /*
1656 * EMT Rendezvous (must be serviced before termination).
1657 */
1658 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1659 {
1660 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1661 UPDATE_RC();
1662 /** @todo HACK ALERT! The following test is to make sure EM+TM
1663 * thinks the VM is stopped/reset before the next VM state change
1664 * is made. We need a better solution for this, or at least make it
1665 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1666 * VINF_EM_SUSPEND). */
1667 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1668 {
1669 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1670 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1671 return rc;
1672 }
1673 }
1674
1675 /*
1676 * State change request (cleared by vmR3SetStateLocked).
1677 */
1678 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1679 {
1680 VMSTATE enmState = VMR3GetState(pVM);
1681 switch (enmState)
1682 {
1683 case VMSTATE_FATAL_ERROR:
1684 case VMSTATE_FATAL_ERROR_LS:
1685 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1686 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1687 return VINF_EM_SUSPEND;
1688
1689 case VMSTATE_DESTROYING:
1690 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1691 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1692 return VINF_EM_TERMINATE;
1693
1694 default:
1695 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1696 }
1697 }
1698
1699 /*
1700 * Debugger Facility polling.
1701 */
1702 if (VM_FF_IS_PENDING(pVM, VM_FF_DBGF))
1703 {
1704 rc2 = DBGFR3VMMForcedAction(pVM);
1705 UPDATE_RC();
1706 }
1707
1708 /*
1709 * Postponed reset request.
1710 */
1711 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1712 {
1713 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1714 UPDATE_RC();
1715 }
1716
1717#ifdef VBOX_WITH_RAW_MODE
1718 /*
1719 * CSAM page scanning.
1720 */
1721 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1722 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1723 {
1724 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1725
1726 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1727 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1728
1729 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1730 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1731 }
1732#endif
1733
1734 /*
1735 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1736 */
1737 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1738 {
1739 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1740 UPDATE_RC();
1741 if (rc == VINF_EM_NO_MEMORY)
1742 return rc;
1743 }
1744
1745 /* check that we got them all */
1746 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1747 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0));
1748 }
1749
1750 /*
1751 * Normal priority then.
1752 * (Executed in no particular order.)
1753 */
1754 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1755 {
1756 /*
1757 * PDM Queues are pending.
1758 */
1759 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1760 PDMR3QueueFlushAll(pVM);
1761
1762 /*
1763 * PDM DMA transfers are pending.
1764 */
1765 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1766 PDMR3DmaRun(pVM);
1767
1768 /*
1769 * EMT Rendezvous (make sure they are handled before the requests).
1770 */
1771 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1772 {
1773 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1774 UPDATE_RC();
1775 /** @todo HACK ALERT! The following test is to make sure EM+TM
1776 * thinks the VM is stopped/reset before the next VM state change
1777 * is made. We need a better solution for this, or at least make it
1778 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1779 * VINF_EM_SUSPEND). */
1780 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1781 {
1782 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1783 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1784 return rc;
1785 }
1786 }
1787
1788 /*
1789 * Requests from other threads.
1790 */
1791 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1792 {
1793 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1794 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1795 {
1796 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1797 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1798 return rc2;
1799 }
1800 UPDATE_RC();
1801 /** @todo HACK ALERT! The following test is to make sure EM+TM
1802 * thinks the VM is stopped/reset before the next VM state change
1803 * is made. We need a better solution for this, or at least make it
1804 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1805 * VINF_EM_SUSPEND). */
1806 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1807 {
1808 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1809 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1810 return rc;
1811 }
1812 }
1813
1814#ifdef VBOX_WITH_REM
1815 /* Replay the handler notification changes. */
1816 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1817 {
1818 /* Try not to cause deadlocks. */
1819 if ( pVM->cCpus == 1
1820 || ( !PGMIsLockOwner(pVM)
1821 && !IOMIsLockWriteOwner(pVM))
1822 )
1823 {
1824 EMRemLock(pVM);
1825 REMR3ReplayHandlerNotifications(pVM);
1826 EMRemUnlock(pVM);
1827 }
1828 }
1829#endif
1830
1831 /* check that we got them all */
1832 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1833 }
1834
1835 /*
1836 * Normal priority then. (per-VCPU)
1837 * (Executed in no particular order.)
1838 */
1839 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1840 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1841 {
1842 /*
1843 * Requests from other threads.
1844 */
1845 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1846 {
1847 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1848 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1849 {
1850 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1851 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1852 return rc2;
1853 }
1854 UPDATE_RC();
1855 /** @todo HACK ALERT! The following test is to make sure EM+TM
1856 * thinks the VM is stopped/reset before the next VM state change
1857 * is made. We need a better solution for this, or at least make it
1858 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1859 * VINF_EM_SUSPEND). */
1860 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1861 {
1862 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1863 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1864 return rc;
1865 }
1866 }
1867
1868 /*
1869 * Forced unhalting of EMT.
1870 */
1871 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
1872 {
1873 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
1874 if (rc == VINF_EM_HALT)
1875 rc = VINF_EM_RESCHEDULE;
1876 else
1877 {
1878 rc2 = VINF_EM_RESCHEDULE;
1879 UPDATE_RC();
1880 }
1881 }
1882
1883 /* check that we got them all */
1884 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST | VMCPU_FF_UNHALT)));
1885 }
1886
1887 /*
1888 * High priority pre execution chunk last.
1889 * (Executed in ascending priority order.)
1890 */
1891 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1892 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1893 {
1894 /*
1895 * Timers before interrupts.
1896 */
1897 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1898 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1899 TMR3TimerQueuesDo(pVM);
1900
1901#ifdef VBOX_WITH_NEW_APIC
1902 /*
1903 * Pick up asynchronously posted interrupts into the APIC.
1904 */
1905 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1906 APICUpdatePendingInterrupts(pVCpu);
1907#endif
1908
1909 /*
1910 * The instruction following an emulated STI should *always* be executed!
1911 *
1912 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1913 * the eip is the same as the inhibited instr address. Before we
1914 * are able to execute this instruction in raw mode (iret to
1915 * guest code) an external interrupt might force a world switch
1916 * again. Possibly allowing a guest interrupt to be dispatched
1917 * in the process. This could break the guest. Sounds very
1918 * unlikely, but such timing sensitive problem are not as rare as
1919 * you might think.
1920 */
1921 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1922 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1923 {
1924 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1925 {
1926 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1927 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1928 }
1929 else
1930 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1931 }
1932
1933 /*
1934 * Interrupts.
1935 */
1936 bool fWakeupPending = false;
1937 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1938 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1939 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1940 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1941#ifdef VBOX_WITH_RAW_MODE
1942 && PATMAreInterruptsEnabled(pVM)
1943#else
1944 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1945#endif
1946 && !HMR3IsEventPending(pVCpu))
1947 {
1948 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1949 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1950 {
1951 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1952 /** @todo this really isn't nice, should properly handle this */
1953 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1954 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1955 rc2 = VINF_EM_RESCHEDULE;
1956#ifdef VBOX_STRICT
1957 rcIrq = rc2;
1958#endif
1959 UPDATE_RC();
1960 /* Reschedule required: We must not miss the wakeup below! */
1961 fWakeupPending = true;
1962 }
1963#ifdef VBOX_WITH_REM
1964 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1965 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1966 {
1967 Log2(("REMR3QueryPendingInterrupt -> %#x\n", REMR3QueryPendingInterrupt(pVM, pVCpu)));
1968 rc2 = VINF_EM_RESCHEDULE_REM;
1969 UPDATE_RC();
1970 }
1971#endif
1972 }
1973
1974 /*
1975 * Allocate handy pages.
1976 */
1977 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1978 {
1979 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1980 UPDATE_RC();
1981 }
1982
1983 /*
1984 * Debugger Facility request.
1985 */
1986 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1987 {
1988 rc2 = DBGFR3VMMForcedAction(pVM);
1989 UPDATE_RC();
1990 }
1991
1992 /*
1993 * EMT Rendezvous (must be serviced before termination).
1994 */
1995 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
1996 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1997 {
1998 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1999 UPDATE_RC();
2000 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2001 * stopped/reset before the next VM state change is made. We need a better
2002 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2003 * && rc >= VINF_EM_SUSPEND). */
2004 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2005 {
2006 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2007 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2008 return rc;
2009 }
2010 }
2011
2012 /*
2013 * State change request (cleared by vmR3SetStateLocked).
2014 */
2015 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2016 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2017 {
2018 VMSTATE enmState = VMR3GetState(pVM);
2019 switch (enmState)
2020 {
2021 case VMSTATE_FATAL_ERROR:
2022 case VMSTATE_FATAL_ERROR_LS:
2023 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2024 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2025 return VINF_EM_SUSPEND;
2026
2027 case VMSTATE_DESTROYING:
2028 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2029 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2030 return VINF_EM_TERMINATE;
2031
2032 default:
2033 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2034 }
2035 }
2036
2037 /*
2038 * Out of memory? Since most of our fellow high priority actions may cause us
2039 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2040 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2041 * than us since we can terminate without allocating more memory.
2042 */
2043 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2044 {
2045 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2046 UPDATE_RC();
2047 if (rc == VINF_EM_NO_MEMORY)
2048 return rc;
2049 }
2050
2051 /*
2052 * If the virtual sync clock is still stopped, make TM restart it.
2053 */
2054 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2055 TMR3VirtualSyncFF(pVM, pVCpu);
2056
2057#ifdef DEBUG
2058 /*
2059 * Debug, pause the VM.
2060 */
2061 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2062 {
2063 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2064 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2065 return VINF_EM_SUSPEND;
2066 }
2067#endif
2068
2069 /* check that we got them all */
2070 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2071 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2072 }
2073
2074#undef UPDATE_RC
2075 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2076 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2077 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2078 return rc;
2079}
2080
2081
2082/**
2083 * Check if the preset execution time cap restricts guest execution scheduling.
2084 *
2085 * @returns true if allowed, false otherwise
2086 * @param pVM The cross context VM structure.
2087 * @param pVCpu The cross context virtual CPU structure.
2088 */
2089bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2090{
2091 uint64_t u64UserTime, u64KernelTime;
2092
2093 if ( pVM->uCpuExecutionCap != 100
2094 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2095 {
2096 uint64_t u64TimeNow = RTTimeMilliTS();
2097 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2098 {
2099 /* New time slice. */
2100 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2101 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2102 pVCpu->em.s.u64TimeSliceExec = 0;
2103 }
2104 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2105
2106 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2107 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2108 return false;
2109 }
2110 return true;
2111}
2112
2113
2114/**
2115 * Execute VM.
2116 *
2117 * This function is the main loop of the VM. The emulation thread
2118 * calls this function when the VM has been successfully constructed
2119 * and we're ready for executing the VM.
2120 *
2121 * Returning from this function means that the VM is turned off or
2122 * suspended (state already saved) and deconstruction is next in line.
2123 *
2124 * All interaction from other thread are done using forced actions
2125 * and signaling of the wait object.
2126 *
2127 * @returns VBox status code, informational status codes may indicate failure.
2128 * @param pVM The cross context VM structure.
2129 * @param pVCpu The cross context virtual CPU structure.
2130 */
2131VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2132{
2133 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2134 pVM,
2135 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2136 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2137 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2138 pVCpu->em.s.fForceRAW));
2139 VM_ASSERT_EMT(pVM);
2140 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2141 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2142 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2143 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2144
2145 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2146 if (rc == 0)
2147 {
2148 /*
2149 * Start the virtual time.
2150 */
2151 TMR3NotifyResume(pVM, pVCpu);
2152
2153 /*
2154 * The Outer Main Loop.
2155 */
2156 bool fFFDone = false;
2157
2158 /* Reschedule right away to start in the right state. */
2159 rc = VINF_SUCCESS;
2160
2161 /* If resuming after a pause or a state load, restore the previous
2162 state or else we'll start executing code. Else, just reschedule. */
2163 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2164 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2165 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2166 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2167 else
2168 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2169 pVCpu->em.s.cIemThenRemInstructions = 0;
2170 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2171
2172 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2173 for (;;)
2174 {
2175 /*
2176 * Before we can schedule anything (we're here because
2177 * scheduling is required) we must service any pending
2178 * forced actions to avoid any pending action causing
2179 * immediate rescheduling upon entering an inner loop
2180 *
2181 * Do forced actions.
2182 */
2183 if ( !fFFDone
2184 && RT_SUCCESS(rc)
2185 && rc != VINF_EM_TERMINATE
2186 && rc != VINF_EM_OFF
2187 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2188 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2189 {
2190 rc = emR3ForcedActions(pVM, pVCpu, rc);
2191 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2192 if ( ( rc == VINF_EM_RESCHEDULE_REM
2193 || rc == VINF_EM_RESCHEDULE_HM)
2194 && pVCpu->em.s.fForceRAW)
2195 rc = VINF_EM_RESCHEDULE_RAW;
2196 }
2197 else if (fFFDone)
2198 fFFDone = false;
2199
2200 /*
2201 * Now what to do?
2202 */
2203 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2204 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2205 switch (rc)
2206 {
2207 /*
2208 * Keep doing what we're currently doing.
2209 */
2210 case VINF_SUCCESS:
2211 break;
2212
2213 /*
2214 * Reschedule - to raw-mode execution.
2215 */
2216 case VINF_EM_RESCHEDULE_RAW:
2217 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2218 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2219 pVCpu->em.s.enmState = EMSTATE_RAW;
2220 break;
2221
2222 /*
2223 * Reschedule - to hardware accelerated raw-mode execution.
2224 */
2225 case VINF_EM_RESCHEDULE_HM:
2226 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2227 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2228 Assert(!pVCpu->em.s.fForceRAW);
2229 pVCpu->em.s.enmState = EMSTATE_HM;
2230 break;
2231
2232 /*
2233 * Reschedule - to recompiled execution.
2234 */
2235 case VINF_EM_RESCHEDULE_REM:
2236 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2237 if (HMIsEnabled(pVM))
2238 {
2239 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2240 enmOldState, EMSTATE_IEM_THEN_REM));
2241 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2242 {
2243 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2244 pVCpu->em.s.cIemThenRemInstructions = 0;
2245 }
2246 }
2247 else
2248 {
2249 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2250 pVCpu->em.s.enmState = EMSTATE_REM;
2251 }
2252 break;
2253
2254 /*
2255 * Resume.
2256 */
2257 case VINF_EM_RESUME:
2258 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2259 /* Don't reschedule in the halted or wait for SIPI case. */
2260 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2261 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2262 {
2263 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2264 break;
2265 }
2266 /* fall through and get scheduled. */
2267
2268 /*
2269 * Reschedule.
2270 */
2271 case VINF_EM_RESCHEDULE:
2272 {
2273 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2274 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2275 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2276 pVCpu->em.s.cIemThenRemInstructions = 0;
2277 pVCpu->em.s.enmState = enmState;
2278 break;
2279 }
2280
2281 /*
2282 * Halted.
2283 */
2284 case VINF_EM_HALT:
2285 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2286 pVCpu->em.s.enmState = EMSTATE_HALTED;
2287 break;
2288
2289 /*
2290 * Switch to the wait for SIPI state (application processor only)
2291 */
2292 case VINF_EM_WAIT_SIPI:
2293 Assert(pVCpu->idCpu != 0);
2294 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2295 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2296 break;
2297
2298
2299 /*
2300 * Suspend.
2301 */
2302 case VINF_EM_SUSPEND:
2303 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2304 Assert(enmOldState != EMSTATE_SUSPENDED);
2305 pVCpu->em.s.enmPrevState = enmOldState;
2306 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2307 break;
2308
2309 /*
2310 * Reset.
2311 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2312 */
2313 case VINF_EM_RESET:
2314 {
2315 if (pVCpu->idCpu == 0)
2316 {
2317 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2318 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2319 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2320 pVCpu->em.s.cIemThenRemInstructions = 0;
2321 pVCpu->em.s.enmState = enmState;
2322 }
2323 else
2324 {
2325 /* All other VCPUs go into the wait for SIPI state. */
2326 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2327 }
2328 break;
2329 }
2330
2331 /*
2332 * Power Off.
2333 */
2334 case VINF_EM_OFF:
2335 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2336 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2337 TMR3NotifySuspend(pVM, pVCpu);
2338 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2339 return rc;
2340
2341 /*
2342 * Terminate the VM.
2343 */
2344 case VINF_EM_TERMINATE:
2345 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2346 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2347 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2348 TMR3NotifySuspend(pVM, pVCpu);
2349 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2350 return rc;
2351
2352
2353 /*
2354 * Out of memory, suspend the VM and stuff.
2355 */
2356 case VINF_EM_NO_MEMORY:
2357 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2358 Assert(enmOldState != EMSTATE_SUSPENDED);
2359 pVCpu->em.s.enmPrevState = enmOldState;
2360 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2361 TMR3NotifySuspend(pVM, pVCpu);
2362 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2363
2364 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2365 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2366 if (rc != VINF_EM_SUSPEND)
2367 {
2368 if (RT_SUCCESS_NP(rc))
2369 {
2370 AssertLogRelMsgFailed(("%Rrc\n", rc));
2371 rc = VERR_EM_INTERNAL_ERROR;
2372 }
2373 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2374 }
2375 return rc;
2376
2377 /*
2378 * Guest debug events.
2379 */
2380 case VINF_EM_DBG_STEPPED:
2381 case VINF_EM_DBG_STOP:
2382 case VINF_EM_DBG_EVENT:
2383 case VINF_EM_DBG_BREAKPOINT:
2384 case VINF_EM_DBG_STEP:
2385 if (enmOldState == EMSTATE_RAW)
2386 {
2387 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2388 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2389 }
2390 else if (enmOldState == EMSTATE_HM)
2391 {
2392 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2393 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2394 }
2395 else if (enmOldState == EMSTATE_REM)
2396 {
2397 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2398 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2399 }
2400 else
2401 {
2402 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2403 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2404 }
2405 break;
2406
2407 /*
2408 * Hypervisor debug events.
2409 */
2410 case VINF_EM_DBG_HYPER_STEPPED:
2411 case VINF_EM_DBG_HYPER_BREAKPOINT:
2412 case VINF_EM_DBG_HYPER_ASSERTION:
2413 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2414 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2415 break;
2416
2417 /*
2418 * Triple fault.
2419 */
2420 case VINF_EM_TRIPLE_FAULT:
2421 if (!pVM->em.s.fGuruOnTripleFault)
2422 {
2423 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2424 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2425 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2426 continue;
2427 }
2428 /* Else fall through and trigger a guru. */
2429 case VERR_VMM_RING0_ASSERTION:
2430 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2431 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2432 break;
2433
2434 /*
2435 * Any error code showing up here other than the ones we
2436 * know and process above are considered to be FATAL.
2437 *
2438 * Unknown warnings and informational status codes are also
2439 * included in this.
2440 */
2441 default:
2442 if (RT_SUCCESS_NP(rc))
2443 {
2444 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2445 rc = VERR_EM_INTERNAL_ERROR;
2446 }
2447 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2448 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2449 break;
2450 }
2451
2452 /*
2453 * Act on state transition.
2454 */
2455 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2456 if (enmOldState != enmNewState)
2457 {
2458 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2459
2460 /* Clear MWait flags. */
2461 if ( enmOldState == EMSTATE_HALTED
2462 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2463 && ( enmNewState == EMSTATE_RAW
2464 || enmNewState == EMSTATE_HM
2465 || enmNewState == EMSTATE_REM
2466 || enmNewState == EMSTATE_IEM_THEN_REM
2467 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2468 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2469 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2470 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2471 {
2472 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2473 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2474 }
2475 }
2476 else
2477 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2478
2479 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2480 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2481
2482 /*
2483 * Act on the new state.
2484 */
2485 switch (enmNewState)
2486 {
2487 /*
2488 * Execute raw.
2489 */
2490 case EMSTATE_RAW:
2491#ifdef VBOX_WITH_RAW_MODE
2492 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2493#else
2494 AssertLogRelMsgFailed(("%Rrc\n", rc));
2495 rc = VERR_EM_INTERNAL_ERROR;
2496#endif
2497 break;
2498
2499 /*
2500 * Execute hardware accelerated raw.
2501 */
2502 case EMSTATE_HM:
2503 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2504 break;
2505
2506 /*
2507 * Execute recompiled.
2508 */
2509 case EMSTATE_REM:
2510 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2511 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2512 break;
2513
2514 /*
2515 * Execute in the interpreter.
2516 */
2517 case EMSTATE_IEM:
2518 {
2519#if 0 /* For testing purposes. */
2520 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2521 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2522 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2523 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2524 rc = VINF_SUCCESS;
2525 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2526#endif
2527 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu));
2528 if (pVM->em.s.fIemExecutesAll)
2529 {
2530 Assert(rc != VINF_EM_RESCHEDULE_REM);
2531 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2532 Assert(rc != VINF_EM_RESCHEDULE_HM);
2533 }
2534 fFFDone = false;
2535 break;
2536 }
2537
2538 /*
2539 * Execute in IEM, hoping we can quickly switch aback to HM
2540 * or RAW execution. If our hopes fail, we go to REM.
2541 */
2542 case EMSTATE_IEM_THEN_REM:
2543 {
2544 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2545 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2546 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2547 break;
2548 }
2549
2550 /*
2551 * Application processor execution halted until SIPI.
2552 */
2553 case EMSTATE_WAIT_SIPI:
2554 /* no break */
2555 /*
2556 * hlt - execution halted until interrupt.
2557 */
2558 case EMSTATE_HALTED:
2559 {
2560 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2561 /* If HM (or someone else) store a pending interrupt in
2562 TRPM, it must be dispatched ASAP without any halting.
2563 Anything pending in TRPM has been accepted and the CPU
2564 should already be the right state to receive it. */
2565 if (TRPMHasTrap(pVCpu))
2566 rc = VINF_EM_RESCHEDULE;
2567 /* MWAIT has a special extension where it's woken up when
2568 an interrupt is pending even when IF=0. */
2569 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2570 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2571 {
2572 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2573 if (rc == VINF_SUCCESS)
2574 {
2575#ifdef VBOX_WITH_NEW_APIC
2576 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2577 APICUpdatePendingInterrupts(pVCpu);
2578#endif
2579 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2580 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2581 {
2582 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2583 rc = VINF_EM_RESCHEDULE;
2584 }
2585 }
2586 }
2587 else
2588 {
2589 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2590 if ( rc == VINF_SUCCESS
2591 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2592 {
2593 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2594 rc = VINF_EM_RESCHEDULE;
2595 }
2596 }
2597
2598 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2599 break;
2600 }
2601
2602 /*
2603 * Suspended - return to VM.cpp.
2604 */
2605 case EMSTATE_SUSPENDED:
2606 TMR3NotifySuspend(pVM, pVCpu);
2607 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2608 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2609 return VINF_EM_SUSPEND;
2610
2611 /*
2612 * Debugging in the guest.
2613 */
2614 case EMSTATE_DEBUG_GUEST_RAW:
2615 case EMSTATE_DEBUG_GUEST_HM:
2616 case EMSTATE_DEBUG_GUEST_IEM:
2617 case EMSTATE_DEBUG_GUEST_REM:
2618 TMR3NotifySuspend(pVM, pVCpu);
2619 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2620 TMR3NotifyResume(pVM, pVCpu);
2621 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2622 break;
2623
2624 /*
2625 * Debugging in the hypervisor.
2626 */
2627 case EMSTATE_DEBUG_HYPER:
2628 {
2629 TMR3NotifySuspend(pVM, pVCpu);
2630 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2631
2632 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2633 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2634 if (rc != VINF_SUCCESS)
2635 {
2636 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2637 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2638 else
2639 {
2640 /* switch to guru meditation mode */
2641 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2642 VMMR3FatalDump(pVM, pVCpu, rc);
2643 }
2644 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2645 return rc;
2646 }
2647
2648 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2649 TMR3NotifyResume(pVM, pVCpu);
2650 break;
2651 }
2652
2653 /*
2654 * Guru meditation takes place in the debugger.
2655 */
2656 case EMSTATE_GURU_MEDITATION:
2657 {
2658 TMR3NotifySuspend(pVM, pVCpu);
2659 VMMR3FatalDump(pVM, pVCpu, rc);
2660 emR3Debug(pVM, pVCpu, rc);
2661 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2662 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2663 return rc;
2664 }
2665
2666 /*
2667 * The states we don't expect here.
2668 */
2669 case EMSTATE_NONE:
2670 case EMSTATE_TERMINATING:
2671 default:
2672 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2673 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2674 TMR3NotifySuspend(pVM, pVCpu);
2675 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2676 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2677 return VERR_EM_INTERNAL_ERROR;
2678 }
2679 } /* The Outer Main Loop */
2680 }
2681 else
2682 {
2683 /*
2684 * Fatal error.
2685 */
2686 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2687 TMR3NotifySuspend(pVM, pVCpu);
2688 VMMR3FatalDump(pVM, pVCpu, rc);
2689 emR3Debug(pVM, pVCpu, rc);
2690 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2691 /** @todo change the VM state! */
2692 return rc;
2693 }
2694
2695 /* (won't ever get here). */
2696 AssertFailed();
2697}
2698
2699/**
2700 * Notify EM of a state change (used by FTM)
2701 *
2702 * @param pVM The cross context VM structure.
2703 */
2704VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2705{
2706 PVMCPU pVCpu = VMMGetCpu(pVM);
2707
2708 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2709 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2710 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2711 return VINF_SUCCESS;
2712}
2713
2714/**
2715 * Notify EM of a state change (used by FTM)
2716 *
2717 * @param pVM The cross context VM structure.
2718 */
2719VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2720{
2721 PVMCPU pVCpu = VMMGetCpu(pVM);
2722 EMSTATE enmCurState = pVCpu->em.s.enmState;
2723
2724 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2725 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2726 pVCpu->em.s.enmPrevState = enmCurState;
2727 return VINF_SUCCESS;
2728}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette