VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 62287

Last change on this file since 62287 was 62287, checked in by vboxsync, 9 years ago

REM: Killed the REMR3NotifyPendingInterrupt interface. Misguided hack for something TRPM should be doing and couldn't because REMR3State wasn't doing it's job wrt TRPM_HARDWARE_INT.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 124.9 KB
Line 
1/* $Id: EM.cpp 62287 2016-07-15 18:44:49Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2015 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#ifdef VBOX_WITH_NEW_APIC
53# include <VBox/vmm/apic.h>
54#endif
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
95static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
96int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
97
98
99/**
100 * Initializes the EM.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 */
105VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
106{
107 LogFlow(("EMR3Init\n"));
108 /*
109 * Assert alignment and sizes.
110 */
111 AssertCompileMemberAlignment(VM, em.s, 32);
112 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
113 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
114
115 /*
116 * Init the structure.
117 */
118 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
119 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
120 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
121
122 bool fEnabled;
123 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileUser = !fEnabled;
126
127 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->fRecompileSupervisor = !fEnabled;
130
131#ifdef VBOX_WITH_RAW_RING1
132 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
133 AssertLogRelRCReturn(rc, rc);
134#else
135 pVM->fRawRing1Enabled = false; /* Disabled by default. */
136#endif
137
138 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
139 AssertLogRelRCReturn(rc, rc);
140
141 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
142 AssertLogRelRCReturn(rc, rc);
143 pVM->em.s.fGuruOnTripleFault = !fEnabled;
144 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
145 {
146 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
147 pVM->em.s.fGuruOnTripleFault = true;
148 }
149
150 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
151 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
152
153#ifdef VBOX_WITH_REM
154 /*
155 * Initialize the REM critical section.
156 */
157 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
158 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
159 AssertRCReturn(rc, rc);
160#endif
161
162 /*
163 * Saved state.
164 */
165 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
166 NULL, NULL, NULL,
167 NULL, emR3Save, NULL,
168 NULL, emR3Load, NULL);
169 if (RT_FAILURE(rc))
170 return rc;
171
172 for (VMCPUID i = 0; i < pVM->cCpus; i++)
173 {
174 PVMCPU pVCpu = &pVM->aCpus[i];
175
176 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
177 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
178 pVCpu->em.s.fForceRAW = false;
179
180 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
181#ifdef VBOX_WITH_RAW_MODE
182 if (!HMIsEnabled(pVM))
183 {
184 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
185 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
186 }
187#endif
188
189 /* Force reset of the time slice. */
190 pVCpu->em.s.u64TimeSliceStart = 0;
191
192# define EM_REG_COUNTER(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_COUNTER_USED(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204# define EM_REG_PROFILE_ADV(a, b, c) \
205 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
206 AssertRC(rc);
207
208 /*
209 * Statistics.
210 */
211#ifdef VBOX_WITH_STATISTICS
212 PEMSTATS pStats;
213 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
214 if (RT_FAILURE(rc))
215 return rc;
216
217 pVCpu->em.s.pStatsR3 = pStats;
218 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
219 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
220
221 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
222 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
223
224 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
225 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226
227 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301
302 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
303 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
304
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391
392 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
393 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
397 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
418
419 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
420 pVCpu->em.s.pCliStatTree = 0;
421
422 /* these should be considered for release statistics. */
423 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
424 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
429 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
433 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
435
436#endif /* VBOX_WITH_STATISTICS */
437
438 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
439 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
440 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
441 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
442 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
443
444 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
445 }
446
447 emR3InitDbg(pVM);
448 return VINF_SUCCESS;
449}
450
451
452/**
453 * Applies relocations to data and code managed by this
454 * component. This function will be called at init and
455 * whenever the VMM need to relocate it self inside the GC.
456 *
457 * @param pVM The cross context VM structure.
458 */
459VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
460{
461 LogFlow(("EMR3Relocate\n"));
462 for (VMCPUID i = 0; i < pVM->cCpus; i++)
463 {
464 PVMCPU pVCpu = &pVM->aCpus[i];
465 if (pVCpu->em.s.pStatsR3)
466 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
467 }
468}
469
470
471/**
472 * Reset the EM state for a CPU.
473 *
474 * Called by EMR3Reset and hot plugging.
475 *
476 * @param pVCpu The cross context virtual CPU structure.
477 */
478VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
479{
480 pVCpu->em.s.fForceRAW = false;
481
482 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
483 out of the HALTED state here so that enmPrevState doesn't end up as
484 HALTED when EMR3Execute returns. */
485 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
486 {
487 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
488 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
489 }
490}
491
492
493/**
494 * Reset notification.
495 *
496 * @param pVM The cross context VM structure.
497 */
498VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
499{
500 Log(("EMR3Reset: \n"));
501 for (VMCPUID i = 0; i < pVM->cCpus; i++)
502 EMR3ResetCpu(&pVM->aCpus[i]);
503}
504
505
506/**
507 * Terminates the EM.
508 *
509 * Termination means cleaning up and freeing all resources,
510 * the VM it self is at this point powered off or suspended.
511 *
512 * @returns VBox status code.
513 * @param pVM The cross context VM structure.
514 */
515VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
516{
517 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
518
519#ifdef VBOX_WITH_REM
520 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
521#endif
522 return VINF_SUCCESS;
523}
524
525
526/**
527 * Execute state save operation.
528 *
529 * @returns VBox status code.
530 * @param pVM The cross context VM structure.
531 * @param pSSM SSM operation handle.
532 */
533static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
534{
535 for (VMCPUID i = 0; i < pVM->cCpus; i++)
536 {
537 PVMCPU pVCpu = &pVM->aCpus[i];
538
539 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
540 AssertRCReturn(rc, rc);
541
542 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
543 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
544 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
545 AssertRCReturn(rc, rc);
546
547 /* Save mwait state. */
548 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
549 AssertRCReturn(rc, rc);
550 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
551 AssertRCReturn(rc, rc);
552 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
553 AssertRCReturn(rc, rc);
554 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
555 AssertRCReturn(rc, rc);
556 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
557 AssertRCReturn(rc, rc);
558 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
559 AssertRCReturn(rc, rc);
560 }
561 return VINF_SUCCESS;
562}
563
564
565/**
566 * Execute state load operation.
567 *
568 * @returns VBox status code.
569 * @param pVM The cross context VM structure.
570 * @param pSSM SSM operation handle.
571 * @param uVersion Data layout version.
572 * @param uPass The data pass.
573 */
574static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
575{
576 /*
577 * Validate version.
578 */
579 if ( uVersion > EM_SAVED_STATE_VERSION
580 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
581 {
582 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
583 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
584 }
585 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
586
587 /*
588 * Load the saved state.
589 */
590 for (VMCPUID i = 0; i < pVM->cCpus; i++)
591 {
592 PVMCPU pVCpu = &pVM->aCpus[i];
593
594 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
595 if (RT_FAILURE(rc))
596 pVCpu->em.s.fForceRAW = false;
597 AssertRCReturn(rc, rc);
598
599 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
600 {
601 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
602 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
603 AssertRCReturn(rc, rc);
604 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
605
606 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
607 }
608 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
609 {
610 /* Load mwait state. */
611 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
612 AssertRCReturn(rc, rc);
613 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
614 AssertRCReturn(rc, rc);
615 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
616 AssertRCReturn(rc, rc);
617 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
618 AssertRCReturn(rc, rc);
619 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
620 AssertRCReturn(rc, rc);
621 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
622 AssertRCReturn(rc, rc);
623 }
624
625 Assert(!pVCpu->em.s.pCliStatTree);
626 }
627 return VINF_SUCCESS;
628}
629
630
631/**
632 * Argument packet for emR3SetExecutionPolicy.
633 */
634struct EMR3SETEXECPOLICYARGS
635{
636 EMEXECPOLICY enmPolicy;
637 bool fEnforce;
638};
639
640
641/**
642 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
643 */
644static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
645{
646 /*
647 * Only the first CPU changes the variables.
648 */
649 if (pVCpu->idCpu == 0)
650 {
651 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
652 switch (pArgs->enmPolicy)
653 {
654 case EMEXECPOLICY_RECOMPILE_RING0:
655 pVM->fRecompileSupervisor = pArgs->fEnforce;
656 break;
657 case EMEXECPOLICY_RECOMPILE_RING3:
658 pVM->fRecompileUser = pArgs->fEnforce;
659 break;
660 case EMEXECPOLICY_IEM_ALL:
661 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
662 break;
663 default:
664 AssertFailedReturn(VERR_INVALID_PARAMETER);
665 }
666 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
667 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
668 }
669
670 /*
671 * Force rescheduling if in RAW, HM, IEM, or REM.
672 */
673 return pVCpu->em.s.enmState == EMSTATE_RAW
674 || pVCpu->em.s.enmState == EMSTATE_HM
675 || pVCpu->em.s.enmState == EMSTATE_IEM
676 || pVCpu->em.s.enmState == EMSTATE_REM
677 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
678 ? VINF_EM_RESCHEDULE
679 : VINF_SUCCESS;
680}
681
682
683/**
684 * Changes an execution scheduling policy parameter.
685 *
686 * This is used to enable or disable raw-mode / hardware-virtualization
687 * execution of user and supervisor code.
688 *
689 * @returns VINF_SUCCESS on success.
690 * @returns VINF_RESCHEDULE if a rescheduling might be required.
691 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
692 *
693 * @param pUVM The user mode VM handle.
694 * @param enmPolicy The scheduling policy to change.
695 * @param fEnforce Whether to enforce the policy or not.
696 */
697VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
698{
699 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
700 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
701 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
702
703 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
704 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
705}
706
707
708/**
709 * Queries an execution scheduling policy parameter.
710 *
711 * @returns VBox status code
712 * @param pUVM The user mode VM handle.
713 * @param enmPolicy The scheduling policy to query.
714 * @param pfEnforced Where to return the current value.
715 */
716VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
717{
718 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
719 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
720 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
721 PVM pVM = pUVM->pVM;
722 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
723
724 /* No need to bother EMTs with a query. */
725 switch (enmPolicy)
726 {
727 case EMEXECPOLICY_RECOMPILE_RING0:
728 *pfEnforced = pVM->fRecompileSupervisor;
729 break;
730 case EMEXECPOLICY_RECOMPILE_RING3:
731 *pfEnforced = pVM->fRecompileUser;
732 break;
733 case EMEXECPOLICY_IEM_ALL:
734 *pfEnforced = pVM->em.s.fIemExecutesAll;
735 break;
736 default:
737 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
738 }
739
740 return VINF_SUCCESS;
741}
742
743
744/**
745 * Raise a fatal error.
746 *
747 * Safely terminate the VM with full state report and stuff. This function
748 * will naturally never return.
749 *
750 * @param pVCpu The cross context virtual CPU structure.
751 * @param rc VBox status code.
752 */
753VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
754{
755 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
756 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
757 AssertReleaseMsgFailed(("longjmp returned!\n"));
758}
759
760
761#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
762/**
763 * Gets the EM state name.
764 *
765 * @returns pointer to read only state name,
766 * @param enmState The state.
767 */
768static const char *emR3GetStateName(EMSTATE enmState)
769{
770 switch (enmState)
771 {
772 case EMSTATE_NONE: return "EMSTATE_NONE";
773 case EMSTATE_RAW: return "EMSTATE_RAW";
774 case EMSTATE_HM: return "EMSTATE_HM";
775 case EMSTATE_IEM: return "EMSTATE_IEM";
776 case EMSTATE_REM: return "EMSTATE_REM";
777 case EMSTATE_HALTED: return "EMSTATE_HALTED";
778 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
779 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
780 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
781 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
782 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
783 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
784 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
785 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
786 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
787 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
788 default: return "Unknown!";
789 }
790}
791#endif /* LOG_ENABLED || VBOX_STRICT */
792
793
794/**
795 * Debug loop.
796 *
797 * @returns VBox status code for EM.
798 * @param pVM The cross context VM structure.
799 * @param pVCpu The cross context virtual CPU structure.
800 * @param rc Current EM VBox status code.
801 */
802static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
803{
804 for (;;)
805 {
806 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
807 const VBOXSTRICTRC rcLast = rc;
808
809 /*
810 * Debug related RC.
811 */
812 switch (VBOXSTRICTRC_VAL(rc))
813 {
814 /*
815 * Single step an instruction.
816 */
817 case VINF_EM_DBG_STEP:
818 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
819 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
820 || pVCpu->em.s.fForceRAW /* paranoia */)
821#ifdef VBOX_WITH_RAW_MODE
822 rc = emR3RawStep(pVM, pVCpu);
823#else
824 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
825#endif
826 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
827 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
828#ifdef VBOX_WITH_REM
829 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
830 rc = emR3RemStep(pVM, pVCpu);
831#endif
832 else
833 {
834 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
835 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
836 rc = VINF_EM_DBG_STEPPED;
837 }
838 break;
839
840 /*
841 * Simple events: stepped, breakpoint, stop/assertion.
842 */
843 case VINF_EM_DBG_STEPPED:
844 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
845 break;
846
847 case VINF_EM_DBG_BREAKPOINT:
848 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
849 break;
850
851 case VINF_EM_DBG_STOP:
852 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
853 break;
854
855 case VINF_EM_DBG_EVENT:
856 rc = DBGFR3EventHandlePending(pVM, pVCpu);
857 break;
858
859 case VINF_EM_DBG_HYPER_STEPPED:
860 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
861 break;
862
863 case VINF_EM_DBG_HYPER_BREAKPOINT:
864 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
865 break;
866
867 case VINF_EM_DBG_HYPER_ASSERTION:
868 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
869 RTLogFlush(NULL);
870 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
871 break;
872
873 /*
874 * Guru meditation.
875 */
876 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
877 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
878 break;
879 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
880 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
881 break;
882 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
883 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
884 break;
885
886 default: /** @todo don't use default for guru, but make special errors code! */
887 {
888 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
889 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
890 break;
891 }
892 }
893
894 /*
895 * Process the result.
896 */
897 switch (VBOXSTRICTRC_VAL(rc))
898 {
899 /*
900 * Continue the debugging loop.
901 */
902 case VINF_EM_DBG_STEP:
903 case VINF_EM_DBG_STOP:
904 case VINF_EM_DBG_EVENT:
905 case VINF_EM_DBG_STEPPED:
906 case VINF_EM_DBG_BREAKPOINT:
907 case VINF_EM_DBG_HYPER_STEPPED:
908 case VINF_EM_DBG_HYPER_BREAKPOINT:
909 case VINF_EM_DBG_HYPER_ASSERTION:
910 break;
911
912 /*
913 * Resuming execution (in some form) has to be done here if we got
914 * a hypervisor debug event.
915 */
916 case VINF_SUCCESS:
917 case VINF_EM_RESUME:
918 case VINF_EM_SUSPEND:
919 case VINF_EM_RESCHEDULE:
920 case VINF_EM_RESCHEDULE_RAW:
921 case VINF_EM_RESCHEDULE_REM:
922 case VINF_EM_HALT:
923 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
924 {
925#ifdef VBOX_WITH_RAW_MODE
926 rc = emR3RawResumeHyper(pVM, pVCpu);
927 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
928 continue;
929#else
930 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
931#endif
932 }
933 if (rc == VINF_SUCCESS)
934 rc = VINF_EM_RESCHEDULE;
935 return rc;
936
937 /*
938 * The debugger isn't attached.
939 * We'll simply turn the thing off since that's the easiest thing to do.
940 */
941 case VERR_DBGF_NOT_ATTACHED:
942 switch (VBOXSTRICTRC_VAL(rcLast))
943 {
944 case VINF_EM_DBG_HYPER_STEPPED:
945 case VINF_EM_DBG_HYPER_BREAKPOINT:
946 case VINF_EM_DBG_HYPER_ASSERTION:
947 case VERR_TRPM_PANIC:
948 case VERR_TRPM_DONT_PANIC:
949 case VERR_VMM_RING0_ASSERTION:
950 case VERR_VMM_HYPER_CR3_MISMATCH:
951 case VERR_VMM_RING3_CALL_DISABLED:
952 return rcLast;
953 }
954 return VINF_EM_OFF;
955
956 /*
957 * Status codes terminating the VM in one or another sense.
958 */
959 case VINF_EM_TERMINATE:
960 case VINF_EM_OFF:
961 case VINF_EM_RESET:
962 case VINF_EM_NO_MEMORY:
963 case VINF_EM_RAW_STALE_SELECTOR:
964 case VINF_EM_RAW_IRET_TRAP:
965 case VERR_TRPM_PANIC:
966 case VERR_TRPM_DONT_PANIC:
967 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
968 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
969 case VERR_VMM_RING0_ASSERTION:
970 case VERR_VMM_HYPER_CR3_MISMATCH:
971 case VERR_VMM_RING3_CALL_DISABLED:
972 case VERR_INTERNAL_ERROR:
973 case VERR_INTERNAL_ERROR_2:
974 case VERR_INTERNAL_ERROR_3:
975 case VERR_INTERNAL_ERROR_4:
976 case VERR_INTERNAL_ERROR_5:
977 case VERR_IPE_UNEXPECTED_STATUS:
978 case VERR_IPE_UNEXPECTED_INFO_STATUS:
979 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
980 return rc;
981
982 /*
983 * The rest is unexpected, and will keep us here.
984 */
985 default:
986 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
987 break;
988 }
989 } /* debug for ever */
990}
991
992
993/**
994 * Steps recompiled code.
995 *
996 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
997 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
998 *
999 * @param pVM The cross context VM structure.
1000 * @param pVCpu The cross context virtual CPU structure.
1001 */
1002static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1003{
1004 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1005
1006#ifdef VBOX_WITH_REM
1007 EMRemLock(pVM);
1008
1009 /*
1010 * Switch to REM, step instruction, switch back.
1011 */
1012 int rc = REMR3State(pVM, pVCpu);
1013 if (RT_SUCCESS(rc))
1014 {
1015 rc = REMR3Step(pVM, pVCpu);
1016 REMR3StateBack(pVM, pVCpu);
1017 }
1018 EMRemUnlock(pVM);
1019
1020#else
1021 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1022#endif
1023
1024 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1025 return rc;
1026}
1027
1028
1029/**
1030 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1031 * critical section.
1032 *
1033 * @returns false - new fInREMState value.
1034 * @param pVM The cross context VM structure.
1035 * @param pVCpu The cross context virtual CPU structure.
1036 */
1037DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1038{
1039#ifdef VBOX_WITH_REM
1040 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1041 REMR3StateBack(pVM, pVCpu);
1042 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1043
1044 EMRemUnlock(pVM);
1045#endif
1046 return false;
1047}
1048
1049
1050/**
1051 * Executes recompiled code.
1052 *
1053 * This function contains the recompiler version of the inner
1054 * execution loop (the outer loop being in EMR3ExecuteVM()).
1055 *
1056 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1057 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1058 *
1059 * @param pVM The cross context VM structure.
1060 * @param pVCpu The cross context virtual CPU structure.
1061 * @param pfFFDone Where to store an indicator telling whether or not
1062 * FFs were done before returning.
1063 *
1064 */
1065static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1066{
1067#ifdef LOG_ENABLED
1068 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1069 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1070
1071 if (pCtx->eflags.Bits.u1VM)
1072 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1073 else
1074 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1075#endif
1076 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1077
1078#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1079 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1080 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1081 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1082#endif
1083
1084 /*
1085 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1086 * or the REM suggests raw-mode execution.
1087 */
1088 *pfFFDone = false;
1089#ifdef VBOX_WITH_REM
1090 bool fInREMState = false;
1091#else
1092 uint32_t cLoops = 0;
1093#endif
1094 int rc = VINF_SUCCESS;
1095 for (;;)
1096 {
1097#ifdef VBOX_WITH_REM
1098 /*
1099 * Lock REM and update the state if not already in sync.
1100 *
1101 * Note! Big lock, but you are not supposed to own any lock when
1102 * coming in here.
1103 */
1104 if (!fInREMState)
1105 {
1106 EMRemLock(pVM);
1107 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1108
1109 /* Flush the recompiler translation blocks if the VCPU has changed,
1110 also force a full CPU state resync. */
1111 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1112 {
1113 REMFlushTBs(pVM);
1114 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1115 }
1116 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1117
1118 rc = REMR3State(pVM, pVCpu);
1119
1120 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1121 if (RT_FAILURE(rc))
1122 break;
1123 fInREMState = true;
1124
1125 /*
1126 * We might have missed the raising of VMREQ, TIMER and some other
1127 * important FFs while we were busy switching the state. So, check again.
1128 */
1129 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1130 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1131 {
1132 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1133 goto l_REMDoForcedActions;
1134 }
1135 }
1136#endif
1137
1138 /*
1139 * Execute REM.
1140 */
1141 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1142 {
1143 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1144#ifdef VBOX_WITH_REM
1145 rc = REMR3Run(pVM, pVCpu);
1146#else
1147 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1148#endif
1149 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1150 }
1151 else
1152 {
1153 /* Give up this time slice; virtual time continues */
1154 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1155 RTThreadSleep(5);
1156 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1157 rc = VINF_SUCCESS;
1158 }
1159
1160 /*
1161 * Deal with high priority post execution FFs before doing anything
1162 * else. Sync back the state and leave the lock to be on the safe side.
1163 */
1164 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1165 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1166 {
1167#ifdef VBOX_WITH_REM
1168 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1169#endif
1170 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1171 }
1172
1173 /*
1174 * Process the returned status code.
1175 */
1176 if (rc != VINF_SUCCESS)
1177 {
1178 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1179 break;
1180 if (rc != VINF_REM_INTERRUPED_FF)
1181 {
1182#ifndef VBOX_WITH_REM
1183 /* Try dodge unimplemented IEM trouble by reschduling. */
1184 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1185 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1186 {
1187 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1188 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1189 {
1190 rc = VINF_EM_RESCHEDULE;
1191 break;
1192 }
1193 }
1194#endif
1195
1196 /*
1197 * Anything which is not known to us means an internal error
1198 * and the termination of the VM!
1199 */
1200 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1201 break;
1202 }
1203 }
1204
1205
1206 /*
1207 * Check and execute forced actions.
1208 *
1209 * Sync back the VM state and leave the lock before calling any of
1210 * these, you never know what's going to happen here.
1211 */
1212#ifdef VBOX_HIGH_RES_TIMERS_HACK
1213 TMTimerPollVoid(pVM, pVCpu);
1214#endif
1215 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1216 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1217 || VMCPU_FF_IS_PENDING(pVCpu,
1218 VMCPU_FF_ALL_REM_MASK
1219 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1220 {
1221#ifdef VBOX_WITH_REM
1222l_REMDoForcedActions:
1223 if (fInREMState)
1224 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1225#endif
1226 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1227 rc = emR3ForcedActions(pVM, pVCpu, rc);
1228 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1229 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1230 if ( rc != VINF_SUCCESS
1231 && rc != VINF_EM_RESCHEDULE_REM)
1232 {
1233 *pfFFDone = true;
1234 break;
1235 }
1236 }
1237
1238#ifndef VBOX_WITH_REM
1239 /*
1240 * Have to check if we can get back to fast execution mode every so often.
1241 */
1242 if (!(++cLoops & 7))
1243 {
1244 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1245 if ( enmCheck != EMSTATE_REM
1246 && enmCheck != EMSTATE_IEM_THEN_REM)
1247 return VINF_EM_RESCHEDULE;
1248 }
1249#endif
1250
1251 } /* The Inner Loop, recompiled execution mode version. */
1252
1253
1254#ifdef VBOX_WITH_REM
1255 /*
1256 * Returning. Sync back the VM state if required.
1257 */
1258 if (fInREMState)
1259 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1260#endif
1261
1262 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1263 return rc;
1264}
1265
1266
1267#ifdef DEBUG
1268
1269int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1270{
1271 EMSTATE enmOldState = pVCpu->em.s.enmState;
1272
1273 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1274
1275 Log(("Single step BEGIN:\n"));
1276 for (uint32_t i = 0; i < cIterations; i++)
1277 {
1278 DBGFR3PrgStep(pVCpu);
1279 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1280 emR3RemStep(pVM, pVCpu);
1281 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1282 break;
1283 }
1284 Log(("Single step END:\n"));
1285 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1286 pVCpu->em.s.enmState = enmOldState;
1287 return VINF_EM_RESCHEDULE;
1288}
1289
1290#endif /* DEBUG */
1291
1292
1293/**
1294 * Try execute the problematic code in IEM first, then fall back on REM if there
1295 * is too much of it or if IEM doesn't implement something.
1296 *
1297 * @returns Strict VBox status code from IEMExecLots.
1298 * @param pVM The cross context VM structure.
1299 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1300 * @param pfFFDone Force flags done indicator.
1301 *
1302 * @thread EMT(pVCpu)
1303 */
1304static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1305{
1306 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1307 *pfFFDone = false;
1308
1309 /*
1310 * Execute in IEM for a while.
1311 */
1312 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1313 {
1314 uint32_t cInstructions;
1315 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1316 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1317 if (rcStrict != VINF_SUCCESS)
1318 {
1319 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1320 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1321 break;
1322
1323 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1324 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1325 return rcStrict;
1326 }
1327
1328 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1329 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1330 {
1331 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1332 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1333 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1334 pVCpu->em.s.enmState = enmNewState;
1335 return VINF_SUCCESS;
1336 }
1337
1338 /*
1339 * Check for pending actions.
1340 */
1341 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1342 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK))
1343 return VINF_SUCCESS;
1344 }
1345
1346 /*
1347 * Switch to REM.
1348 */
1349 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1350 pVCpu->em.s.enmState = EMSTATE_REM;
1351 return VINF_SUCCESS;
1352}
1353
1354
1355/**
1356 * Decides whether to execute RAW, HWACC or REM.
1357 *
1358 * @returns new EM state
1359 * @param pVM The cross context VM structure.
1360 * @param pVCpu The cross context virtual CPU structure.
1361 * @param pCtx Pointer to the guest CPU context.
1362 */
1363EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1364{
1365 /*
1366 * When forcing raw-mode execution, things are simple.
1367 */
1368 if (pVCpu->em.s.fForceRAW)
1369 return EMSTATE_RAW;
1370
1371 /*
1372 * We stay in the wait for SIPI state unless explicitly told otherwise.
1373 */
1374 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1375 return EMSTATE_WAIT_SIPI;
1376
1377 /*
1378 * Execute everything in IEM?
1379 */
1380 if (pVM->em.s.fIemExecutesAll)
1381 return EMSTATE_IEM;
1382
1383 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1384 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1385 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1386
1387 X86EFLAGS EFlags = pCtx->eflags;
1388 if (HMIsEnabled(pVM))
1389 {
1390 /*
1391 * Hardware accelerated raw-mode:
1392 */
1393 if ( EMIsHwVirtExecutionEnabled(pVM)
1394 && HMR3CanExecuteGuest(pVM, pCtx))
1395 return EMSTATE_HM;
1396
1397 /*
1398 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1399 * turns off monitoring features essential for raw mode!
1400 */
1401 return EMSTATE_IEM_THEN_REM;
1402 }
1403
1404 /*
1405 * Standard raw-mode:
1406 *
1407 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1408 * or 32 bits protected mode ring 0 code
1409 *
1410 * The tests are ordered by the likelihood of being true during normal execution.
1411 */
1412 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1413 {
1414 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1415 return EMSTATE_REM;
1416 }
1417
1418# ifndef VBOX_RAW_V86
1419 if (EFlags.u32 & X86_EFL_VM) {
1420 Log2(("raw mode refused: VM_MASK\n"));
1421 return EMSTATE_REM;
1422 }
1423# endif
1424
1425 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1426 uint32_t u32CR0 = pCtx->cr0;
1427 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1428 {
1429 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1430 return EMSTATE_REM;
1431 }
1432
1433 if (pCtx->cr4 & X86_CR4_PAE)
1434 {
1435 uint32_t u32Dummy, u32Features;
1436
1437 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1438 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1439 return EMSTATE_REM;
1440 }
1441
1442 unsigned uSS = pCtx->ss.Sel;
1443 if ( pCtx->eflags.Bits.u1VM
1444 || (uSS & X86_SEL_RPL) == 3)
1445 {
1446 if (!EMIsRawRing3Enabled(pVM))
1447 return EMSTATE_REM;
1448
1449 if (!(EFlags.u32 & X86_EFL_IF))
1450 {
1451 Log2(("raw mode refused: IF (RawR3)\n"));
1452 return EMSTATE_REM;
1453 }
1454
1455 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1456 {
1457 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1458 return EMSTATE_REM;
1459 }
1460 }
1461 else
1462 {
1463 if (!EMIsRawRing0Enabled(pVM))
1464 return EMSTATE_REM;
1465
1466 if (EMIsRawRing1Enabled(pVM))
1467 {
1468 /* Only ring 0 and 1 supervisor code. */
1469 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1470 {
1471 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1472 return EMSTATE_REM;
1473 }
1474 }
1475 /* Only ring 0 supervisor code. */
1476 else if ((uSS & X86_SEL_RPL) != 0)
1477 {
1478 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1479 return EMSTATE_REM;
1480 }
1481
1482 // Let's start with pure 32 bits ring 0 code first
1483 /** @todo What's pure 32-bit mode? flat? */
1484 if ( !(pCtx->ss.Attr.n.u1DefBig)
1485 || !(pCtx->cs.Attr.n.u1DefBig))
1486 {
1487 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1488 return EMSTATE_REM;
1489 }
1490
1491 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1492 if (!(u32CR0 & X86_CR0_WP))
1493 {
1494 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1495 return EMSTATE_REM;
1496 }
1497
1498# ifdef VBOX_WITH_RAW_MODE
1499 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1500 {
1501 Log2(("raw r0 mode forced: patch code\n"));
1502# ifdef VBOX_WITH_SAFE_STR
1503 Assert(pCtx->tr.Sel);
1504# endif
1505 return EMSTATE_RAW;
1506 }
1507# endif /* VBOX_WITH_RAW_MODE */
1508
1509# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1510 if (!(EFlags.u32 & X86_EFL_IF))
1511 {
1512 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1513 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1514 return EMSTATE_REM;
1515 }
1516# endif
1517
1518# ifndef VBOX_WITH_RAW_RING1
1519 /** @todo still necessary??? */
1520 if (EFlags.Bits.u2IOPL != 0)
1521 {
1522 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1523 return EMSTATE_REM;
1524 }
1525# endif
1526 }
1527
1528 /*
1529 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1530 */
1531 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1532 {
1533 Log2(("raw mode refused: stale CS\n"));
1534 return EMSTATE_REM;
1535 }
1536 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1537 {
1538 Log2(("raw mode refused: stale SS\n"));
1539 return EMSTATE_REM;
1540 }
1541 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1542 {
1543 Log2(("raw mode refused: stale DS\n"));
1544 return EMSTATE_REM;
1545 }
1546 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1547 {
1548 Log2(("raw mode refused: stale ES\n"));
1549 return EMSTATE_REM;
1550 }
1551 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1552 {
1553 Log2(("raw mode refused: stale FS\n"));
1554 return EMSTATE_REM;
1555 }
1556 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1557 {
1558 Log2(("raw mode refused: stale GS\n"));
1559 return EMSTATE_REM;
1560 }
1561
1562# ifdef VBOX_WITH_SAFE_STR
1563 if (pCtx->tr.Sel == 0)
1564 {
1565 Log(("Raw mode refused -> TR=0\n"));
1566 return EMSTATE_REM;
1567 }
1568# endif
1569
1570 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1571 return EMSTATE_RAW;
1572}
1573
1574
1575/**
1576 * Executes all high priority post execution force actions.
1577 *
1578 * @returns rc or a fatal status code.
1579 *
1580 * @param pVM The cross context VM structure.
1581 * @param pVCpu The cross context virtual CPU structure.
1582 * @param rc The current rc.
1583 */
1584int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1585{
1586 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1587
1588 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1589 PDMCritSectBothFF(pVCpu);
1590
1591 /* Update CR3 (Nested Paging case for HM). */
1592 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1593 {
1594 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1595 if (RT_FAILURE(rc2))
1596 return rc2;
1597 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1598 }
1599
1600 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1601 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1602 {
1603 if (CPUMIsGuestInPAEMode(pVCpu))
1604 {
1605 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1606 AssertPtr(pPdpes);
1607
1608 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1609 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1610 }
1611 else
1612 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1613 }
1614
1615 /* IEM has pending work (typically memory write after INS instruction). */
1616 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1617 rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
1618
1619 /* IOM has pending work (comitting an I/O or MMIO write). */
1620 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1621 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1622
1623#ifdef VBOX_WITH_RAW_MODE
1624 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1625 CSAMR3DoPendingAction(pVM, pVCpu);
1626#endif
1627
1628 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1629 {
1630 if ( rc > VINF_EM_NO_MEMORY
1631 && rc <= VINF_EM_LAST)
1632 rc = VINF_EM_NO_MEMORY;
1633 }
1634
1635 return rc;
1636}
1637
1638
1639/**
1640 * Executes all pending forced actions.
1641 *
1642 * Forced actions can cause execution delays and execution
1643 * rescheduling. The first we deal with using action priority, so
1644 * that for instance pending timers aren't scheduled and ran until
1645 * right before execution. The rescheduling we deal with using
1646 * return codes. The same goes for VM termination, only in that case
1647 * we exit everything.
1648 *
1649 * @returns VBox status code of equal or greater importance/severity than rc.
1650 * The most important ones are: VINF_EM_RESCHEDULE,
1651 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1652 *
1653 * @param pVM The cross context VM structure.
1654 * @param pVCpu The cross context virtual CPU structure.
1655 * @param rc The current rc.
1656 *
1657 */
1658int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1659{
1660 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1661#ifdef VBOX_STRICT
1662 int rcIrq = VINF_SUCCESS;
1663#endif
1664 int rc2;
1665#define UPDATE_RC() \
1666 do { \
1667 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1668 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1669 break; \
1670 if (!rc || rc2 < rc) \
1671 rc = rc2; \
1672 } while (0)
1673 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1674
1675 /*
1676 * Post execution chunk first.
1677 */
1678 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1679 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1680 {
1681 /*
1682 * EMT Rendezvous (must be serviced before termination).
1683 */
1684 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1685 {
1686 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1687 UPDATE_RC();
1688 /** @todo HACK ALERT! The following test is to make sure EM+TM
1689 * thinks the VM is stopped/reset before the next VM state change
1690 * is made. We need a better solution for this, or at least make it
1691 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1692 * VINF_EM_SUSPEND). */
1693 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1694 {
1695 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1696 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1697 return rc;
1698 }
1699 }
1700
1701 /*
1702 * State change request (cleared by vmR3SetStateLocked).
1703 */
1704 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1705 {
1706 VMSTATE enmState = VMR3GetState(pVM);
1707 switch (enmState)
1708 {
1709 case VMSTATE_FATAL_ERROR:
1710 case VMSTATE_FATAL_ERROR_LS:
1711 case VMSTATE_GURU_MEDITATION:
1712 case VMSTATE_GURU_MEDITATION_LS:
1713 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1714 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1715 return VINF_EM_SUSPEND;
1716
1717 case VMSTATE_DESTROYING:
1718 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1719 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1720 return VINF_EM_TERMINATE;
1721
1722 default:
1723 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1724 }
1725 }
1726
1727 /*
1728 * Debugger Facility polling.
1729 */
1730 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1731 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1732 {
1733 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1734 UPDATE_RC();
1735 }
1736
1737 /*
1738 * Postponed reset request.
1739 */
1740 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1741 {
1742 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1743 UPDATE_RC();
1744 }
1745
1746#ifdef VBOX_WITH_RAW_MODE
1747 /*
1748 * CSAM page scanning.
1749 */
1750 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1751 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1752 {
1753 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1754
1755 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1756 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1757
1758 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1759 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1760 }
1761#endif
1762
1763 /*
1764 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1765 */
1766 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1767 {
1768 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1769 UPDATE_RC();
1770 if (rc == VINF_EM_NO_MEMORY)
1771 return rc;
1772 }
1773
1774 /* check that we got them all */
1775 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1776 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1777 }
1778
1779 /*
1780 * Normal priority then.
1781 * (Executed in no particular order.)
1782 */
1783 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1784 {
1785 /*
1786 * PDM Queues are pending.
1787 */
1788 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1789 PDMR3QueueFlushAll(pVM);
1790
1791 /*
1792 * PDM DMA transfers are pending.
1793 */
1794 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1795 PDMR3DmaRun(pVM);
1796
1797 /*
1798 * EMT Rendezvous (make sure they are handled before the requests).
1799 */
1800 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1801 {
1802 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1803 UPDATE_RC();
1804 /** @todo HACK ALERT! The following test is to make sure EM+TM
1805 * thinks the VM is stopped/reset before the next VM state change
1806 * is made. We need a better solution for this, or at least make it
1807 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1808 * VINF_EM_SUSPEND). */
1809 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1810 {
1811 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1812 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1813 return rc;
1814 }
1815 }
1816
1817 /*
1818 * Requests from other threads.
1819 */
1820 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1821 {
1822 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1823 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1824 {
1825 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1826 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1827 return rc2;
1828 }
1829 UPDATE_RC();
1830 /** @todo HACK ALERT! The following test is to make sure EM+TM
1831 * thinks the VM is stopped/reset before the next VM state change
1832 * is made. We need a better solution for this, or at least make it
1833 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1834 * VINF_EM_SUSPEND). */
1835 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1836 {
1837 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1838 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1839 return rc;
1840 }
1841 }
1842
1843#ifdef VBOX_WITH_REM
1844 /* Replay the handler notification changes. */
1845 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1846 {
1847 /* Try not to cause deadlocks. */
1848 if ( pVM->cCpus == 1
1849 || ( !PGMIsLockOwner(pVM)
1850 && !IOMIsLockWriteOwner(pVM))
1851 )
1852 {
1853 EMRemLock(pVM);
1854 REMR3ReplayHandlerNotifications(pVM);
1855 EMRemUnlock(pVM);
1856 }
1857 }
1858#endif
1859
1860 /* check that we got them all */
1861 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1862 }
1863
1864 /*
1865 * Normal priority then. (per-VCPU)
1866 * (Executed in no particular order.)
1867 */
1868 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1869 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1870 {
1871 /*
1872 * Requests from other threads.
1873 */
1874 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1875 {
1876 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1877 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1878 {
1879 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1880 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1881 return rc2;
1882 }
1883 UPDATE_RC();
1884 /** @todo HACK ALERT! The following test is to make sure EM+TM
1885 * thinks the VM is stopped/reset before the next VM state change
1886 * is made. We need a better solution for this, or at least make it
1887 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1888 * VINF_EM_SUSPEND). */
1889 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1890 {
1891 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1892 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1893 return rc;
1894 }
1895 }
1896
1897 /*
1898 * Forced unhalting of EMT.
1899 */
1900 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
1901 {
1902 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
1903 if (rc == VINF_EM_HALT)
1904 rc = VINF_EM_RESCHEDULE;
1905 else
1906 {
1907 rc2 = VINF_EM_RESCHEDULE;
1908 UPDATE_RC();
1909 }
1910 }
1911
1912 /* check that we got them all */
1913 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST | VMCPU_FF_UNHALT)));
1914 }
1915
1916 /*
1917 * High priority pre execution chunk last.
1918 * (Executed in ascending priority order.)
1919 */
1920 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1921 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1922 {
1923 /*
1924 * Timers before interrupts.
1925 */
1926 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1927 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1928 TMR3TimerQueuesDo(pVM);
1929
1930#ifdef VBOX_WITH_NEW_APIC
1931 /*
1932 * Pick up asynchronously posted interrupts into the APIC.
1933 */
1934 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1935 APICUpdatePendingInterrupts(pVCpu);
1936#endif
1937
1938 /*
1939 * The instruction following an emulated STI should *always* be executed!
1940 *
1941 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1942 * the eip is the same as the inhibited instr address. Before we
1943 * are able to execute this instruction in raw mode (iret to
1944 * guest code) an external interrupt might force a world switch
1945 * again. Possibly allowing a guest interrupt to be dispatched
1946 * in the process. This could break the guest. Sounds very
1947 * unlikely, but such timing sensitive problem are not as rare as
1948 * you might think.
1949 */
1950 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1951 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1952 {
1953 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1954 {
1955 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1956 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1957 }
1958 else
1959 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1960 }
1961
1962 /*
1963 * Interrupts.
1964 */
1965 bool fWakeupPending = false;
1966 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1967 && !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1968 && (!rc || rc >= VINF_EM_RESCHEDULE_HM)
1969 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1970#ifdef VBOX_WITH_RAW_MODE
1971 && PATMAreInterruptsEnabled(pVM)
1972#else
1973 && (pVCpu->em.s.pCtx->eflags.u32 & X86_EFL_IF)
1974#endif
1975 && !HMR3IsEventPending(pVCpu))
1976 {
1977 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1978 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1979 {
1980 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1981 /** @todo this really isn't nice, should properly handle this */
1982 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1983 if (pVM->em.s.fIemExecutesAll && (rc2 == VINF_EM_RESCHEDULE_REM || rc2 == VINF_EM_RESCHEDULE_HM || rc2 == VINF_EM_RESCHEDULE_RAW))
1984 rc2 = VINF_EM_RESCHEDULE;
1985#ifdef VBOX_STRICT
1986 rcIrq = rc2;
1987#endif
1988 UPDATE_RC();
1989 /* Reschedule required: We must not miss the wakeup below! */
1990 fWakeupPending = true;
1991 }
1992 }
1993
1994 /*
1995 * Allocate handy pages.
1996 */
1997 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1998 {
1999 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2000 UPDATE_RC();
2001 }
2002
2003 /*
2004 * Debugger Facility request.
2005 */
2006 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2007 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2008 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2009 {
2010 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2011 UPDATE_RC();
2012 }
2013
2014 /*
2015 * EMT Rendezvous (must be serviced before termination).
2016 */
2017 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2018 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2019 {
2020 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2021 UPDATE_RC();
2022 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2023 * stopped/reset before the next VM state change is made. We need a better
2024 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2025 * && rc >= VINF_EM_SUSPEND). */
2026 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2027 {
2028 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2029 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2030 return rc;
2031 }
2032 }
2033
2034 /*
2035 * State change request (cleared by vmR3SetStateLocked).
2036 */
2037 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2038 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2039 {
2040 VMSTATE enmState = VMR3GetState(pVM);
2041 switch (enmState)
2042 {
2043 case VMSTATE_FATAL_ERROR:
2044 case VMSTATE_FATAL_ERROR_LS:
2045 case VMSTATE_GURU_MEDITATION:
2046 case VMSTATE_GURU_MEDITATION_LS:
2047 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2048 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2049 return VINF_EM_SUSPEND;
2050
2051 case VMSTATE_DESTROYING:
2052 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2053 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2054 return VINF_EM_TERMINATE;
2055
2056 default:
2057 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2058 }
2059 }
2060
2061 /*
2062 * Out of memory? Since most of our fellow high priority actions may cause us
2063 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2064 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2065 * than us since we can terminate without allocating more memory.
2066 */
2067 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2068 {
2069 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2070 UPDATE_RC();
2071 if (rc == VINF_EM_NO_MEMORY)
2072 return rc;
2073 }
2074
2075 /*
2076 * If the virtual sync clock is still stopped, make TM restart it.
2077 */
2078 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2079 TMR3VirtualSyncFF(pVM, pVCpu);
2080
2081#ifdef DEBUG
2082 /*
2083 * Debug, pause the VM.
2084 */
2085 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2086 {
2087 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2088 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2089 return VINF_EM_SUSPEND;
2090 }
2091#endif
2092
2093 /* check that we got them all */
2094 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2095 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2096 }
2097
2098#undef UPDATE_RC
2099 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2100 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2101 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2102 return rc;
2103}
2104
2105
2106/**
2107 * Check if the preset execution time cap restricts guest execution scheduling.
2108 *
2109 * @returns true if allowed, false otherwise
2110 * @param pVM The cross context VM structure.
2111 * @param pVCpu The cross context virtual CPU structure.
2112 */
2113bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2114{
2115 uint64_t u64UserTime, u64KernelTime;
2116
2117 if ( pVM->uCpuExecutionCap != 100
2118 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2119 {
2120 uint64_t u64TimeNow = RTTimeMilliTS();
2121 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2122 {
2123 /* New time slice. */
2124 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2125 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2126 pVCpu->em.s.u64TimeSliceExec = 0;
2127 }
2128 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2129
2130 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2131 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2132 return false;
2133 }
2134 return true;
2135}
2136
2137
2138/**
2139 * Execute VM.
2140 *
2141 * This function is the main loop of the VM. The emulation thread
2142 * calls this function when the VM has been successfully constructed
2143 * and we're ready for executing the VM.
2144 *
2145 * Returning from this function means that the VM is turned off or
2146 * suspended (state already saved) and deconstruction is next in line.
2147 *
2148 * All interaction from other thread are done using forced actions
2149 * and signaling of the wait object.
2150 *
2151 * @returns VBox status code, informational status codes may indicate failure.
2152 * @param pVM The cross context VM structure.
2153 * @param pVCpu The cross context virtual CPU structure.
2154 */
2155VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2156{
2157 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2158 pVM,
2159 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2160 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2161 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2162 pVCpu->em.s.fForceRAW));
2163 VM_ASSERT_EMT(pVM);
2164 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2165 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2166 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2167 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2168
2169 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2170 if (rc == 0)
2171 {
2172 /*
2173 * Start the virtual time.
2174 */
2175 TMR3NotifyResume(pVM, pVCpu);
2176
2177 /*
2178 * The Outer Main Loop.
2179 */
2180 bool fFFDone = false;
2181
2182 /* Reschedule right away to start in the right state. */
2183 rc = VINF_SUCCESS;
2184
2185 /* If resuming after a pause or a state load, restore the previous
2186 state or else we'll start executing code. Else, just reschedule. */
2187 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2188 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2189 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2190 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2191 else
2192 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2193 pVCpu->em.s.cIemThenRemInstructions = 0;
2194 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2195
2196 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2197 for (;;)
2198 {
2199 /*
2200 * Before we can schedule anything (we're here because
2201 * scheduling is required) we must service any pending
2202 * forced actions to avoid any pending action causing
2203 * immediate rescheduling upon entering an inner loop
2204 *
2205 * Do forced actions.
2206 */
2207 if ( !fFFDone
2208 && RT_SUCCESS(rc)
2209 && rc != VINF_EM_TERMINATE
2210 && rc != VINF_EM_OFF
2211 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2212 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK)))
2213 {
2214 rc = emR3ForcedActions(pVM, pVCpu, rc);
2215 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2216 if ( ( rc == VINF_EM_RESCHEDULE_REM
2217 || rc == VINF_EM_RESCHEDULE_HM)
2218 && pVCpu->em.s.fForceRAW)
2219 rc = VINF_EM_RESCHEDULE_RAW;
2220 }
2221 else if (fFFDone)
2222 fFFDone = false;
2223
2224 /*
2225 * Now what to do?
2226 */
2227 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2228 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2229 switch (rc)
2230 {
2231 /*
2232 * Keep doing what we're currently doing.
2233 */
2234 case VINF_SUCCESS:
2235 break;
2236
2237 /*
2238 * Reschedule - to raw-mode execution.
2239 */
2240 case VINF_EM_RESCHEDULE_RAW:
2241 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2242 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2243 pVCpu->em.s.enmState = EMSTATE_RAW;
2244 break;
2245
2246 /*
2247 * Reschedule - to hardware accelerated raw-mode execution.
2248 */
2249 case VINF_EM_RESCHEDULE_HM:
2250 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2251 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2252 Assert(!pVCpu->em.s.fForceRAW);
2253 pVCpu->em.s.enmState = EMSTATE_HM;
2254 break;
2255
2256 /*
2257 * Reschedule - to recompiled execution.
2258 */
2259 case VINF_EM_RESCHEDULE_REM:
2260 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2261 if (HMIsEnabled(pVM))
2262 {
2263 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2264 enmOldState, EMSTATE_IEM_THEN_REM));
2265 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2266 {
2267 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2268 pVCpu->em.s.cIemThenRemInstructions = 0;
2269 }
2270 }
2271 else
2272 {
2273 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2274 pVCpu->em.s.enmState = EMSTATE_REM;
2275 }
2276 break;
2277
2278 /*
2279 * Resume.
2280 */
2281 case VINF_EM_RESUME:
2282 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2283 /* Don't reschedule in the halted or wait for SIPI case. */
2284 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2285 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2286 {
2287 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2288 break;
2289 }
2290 /* fall through and get scheduled. */
2291
2292 /*
2293 * Reschedule.
2294 */
2295 case VINF_EM_RESCHEDULE:
2296 {
2297 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2298 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2299 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2300 pVCpu->em.s.cIemThenRemInstructions = 0;
2301 pVCpu->em.s.enmState = enmState;
2302 break;
2303 }
2304
2305 /*
2306 * Halted.
2307 */
2308 case VINF_EM_HALT:
2309 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2310 pVCpu->em.s.enmState = EMSTATE_HALTED;
2311 break;
2312
2313 /*
2314 * Switch to the wait for SIPI state (application processor only)
2315 */
2316 case VINF_EM_WAIT_SIPI:
2317 Assert(pVCpu->idCpu != 0);
2318 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2319 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2320 break;
2321
2322
2323 /*
2324 * Suspend.
2325 */
2326 case VINF_EM_SUSPEND:
2327 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2328 Assert(enmOldState != EMSTATE_SUSPENDED);
2329 pVCpu->em.s.enmPrevState = enmOldState;
2330 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2331 break;
2332
2333 /*
2334 * Reset.
2335 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2336 */
2337 case VINF_EM_RESET:
2338 {
2339 if (pVCpu->idCpu == 0)
2340 {
2341 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2342 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2343 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2344 pVCpu->em.s.cIemThenRemInstructions = 0;
2345 pVCpu->em.s.enmState = enmState;
2346 }
2347 else
2348 {
2349 /* All other VCPUs go into the wait for SIPI state. */
2350 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2351 }
2352 break;
2353 }
2354
2355 /*
2356 * Power Off.
2357 */
2358 case VINF_EM_OFF:
2359 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2360 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2361 TMR3NotifySuspend(pVM, pVCpu);
2362 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2363 return rc;
2364
2365 /*
2366 * Terminate the VM.
2367 */
2368 case VINF_EM_TERMINATE:
2369 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2370 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2371 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2372 TMR3NotifySuspend(pVM, pVCpu);
2373 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2374 return rc;
2375
2376
2377 /*
2378 * Out of memory, suspend the VM and stuff.
2379 */
2380 case VINF_EM_NO_MEMORY:
2381 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2382 Assert(enmOldState != EMSTATE_SUSPENDED);
2383 pVCpu->em.s.enmPrevState = enmOldState;
2384 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2385 TMR3NotifySuspend(pVM, pVCpu);
2386 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2387
2388 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2389 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2390 if (rc != VINF_EM_SUSPEND)
2391 {
2392 if (RT_SUCCESS_NP(rc))
2393 {
2394 AssertLogRelMsgFailed(("%Rrc\n", rc));
2395 rc = VERR_EM_INTERNAL_ERROR;
2396 }
2397 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2398 }
2399 return rc;
2400
2401 /*
2402 * Guest debug events.
2403 */
2404 case VINF_EM_DBG_STEPPED:
2405 case VINF_EM_DBG_STOP:
2406 case VINF_EM_DBG_EVENT:
2407 case VINF_EM_DBG_BREAKPOINT:
2408 case VINF_EM_DBG_STEP:
2409 if (enmOldState == EMSTATE_RAW)
2410 {
2411 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2412 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2413 }
2414 else if (enmOldState == EMSTATE_HM)
2415 {
2416 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2417 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2418 }
2419 else if (enmOldState == EMSTATE_REM)
2420 {
2421 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2422 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2423 }
2424 else
2425 {
2426 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2427 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2428 }
2429 break;
2430
2431 /*
2432 * Hypervisor debug events.
2433 */
2434 case VINF_EM_DBG_HYPER_STEPPED:
2435 case VINF_EM_DBG_HYPER_BREAKPOINT:
2436 case VINF_EM_DBG_HYPER_ASSERTION:
2437 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2438 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2439 break;
2440
2441 /*
2442 * Triple fault.
2443 */
2444 case VINF_EM_TRIPLE_FAULT:
2445 if (!pVM->em.s.fGuruOnTripleFault)
2446 {
2447 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2448 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2449 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2450 continue;
2451 }
2452 /* Else fall through and trigger a guru. */
2453 case VERR_VMM_RING0_ASSERTION:
2454 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2455 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2456 break;
2457
2458 /*
2459 * Any error code showing up here other than the ones we
2460 * know and process above are considered to be FATAL.
2461 *
2462 * Unknown warnings and informational status codes are also
2463 * included in this.
2464 */
2465 default:
2466 if (RT_SUCCESS_NP(rc))
2467 {
2468 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2469 rc = VERR_EM_INTERNAL_ERROR;
2470 }
2471 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2472 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2473 break;
2474 }
2475
2476 /*
2477 * Act on state transition.
2478 */
2479 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2480 if (enmOldState != enmNewState)
2481 {
2482 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2483
2484 /* Clear MWait flags. */
2485 if ( enmOldState == EMSTATE_HALTED
2486 && (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2487 && ( enmNewState == EMSTATE_RAW
2488 || enmNewState == EMSTATE_HM
2489 || enmNewState == EMSTATE_REM
2490 || enmNewState == EMSTATE_IEM_THEN_REM
2491 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2492 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2493 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2494 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2495 {
2496 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2497 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2498 }
2499 }
2500 else
2501 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2502
2503 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2504 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2505
2506 /*
2507 * Act on the new state.
2508 */
2509 switch (enmNewState)
2510 {
2511 /*
2512 * Execute raw.
2513 */
2514 case EMSTATE_RAW:
2515#ifdef VBOX_WITH_RAW_MODE
2516 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2517#else
2518 AssertLogRelMsgFailed(("%Rrc\n", rc));
2519 rc = VERR_EM_INTERNAL_ERROR;
2520#endif
2521 break;
2522
2523 /*
2524 * Execute hardware accelerated raw.
2525 */
2526 case EMSTATE_HM:
2527 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2528 break;
2529
2530 /*
2531 * Execute recompiled.
2532 */
2533 case EMSTATE_REM:
2534 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2535 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2536 break;
2537
2538 /*
2539 * Execute in the interpreter.
2540 */
2541 case EMSTATE_IEM:
2542 {
2543#if 0 /* For testing purposes. */
2544 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2545 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2546 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2547 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2548 rc = VINF_SUCCESS;
2549 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2550#endif
2551 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2552 if (pVM->em.s.fIemExecutesAll)
2553 {
2554 Assert(rc != VINF_EM_RESCHEDULE_REM);
2555 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2556 Assert(rc != VINF_EM_RESCHEDULE_HM);
2557 }
2558 fFFDone = false;
2559 break;
2560 }
2561
2562 /*
2563 * Execute in IEM, hoping we can quickly switch aback to HM
2564 * or RAW execution. If our hopes fail, we go to REM.
2565 */
2566 case EMSTATE_IEM_THEN_REM:
2567 {
2568 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2569 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2570 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2571 break;
2572 }
2573
2574 /*
2575 * Application processor execution halted until SIPI.
2576 */
2577 case EMSTATE_WAIT_SIPI:
2578 /* no break */
2579 /*
2580 * hlt - execution halted until interrupt.
2581 */
2582 case EMSTATE_HALTED:
2583 {
2584 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2585 /* If HM (or someone else) store a pending interrupt in
2586 TRPM, it must be dispatched ASAP without any halting.
2587 Anything pending in TRPM has been accepted and the CPU
2588 should already be the right state to receive it. */
2589 if (TRPMHasTrap(pVCpu))
2590 rc = VINF_EM_RESCHEDULE;
2591 /* MWAIT has a special extension where it's woken up when
2592 an interrupt is pending even when IF=0. */
2593 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2594 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2595 {
2596 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2597 if (rc == VINF_SUCCESS)
2598 {
2599#ifdef VBOX_WITH_NEW_APIC
2600 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2601 APICUpdatePendingInterrupts(pVCpu);
2602#endif
2603 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2604 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2605 {
2606 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2607 rc = VINF_EM_RESCHEDULE;
2608 }
2609 }
2610 }
2611 else
2612 {
2613 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2614 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2615 check VMCPU_FF_UPDATE_APIC here. */
2616 if ( rc == VINF_SUCCESS
2617 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2618 {
2619 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2620 rc = VINF_EM_RESCHEDULE;
2621 }
2622 }
2623
2624 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2625 break;
2626 }
2627
2628 /*
2629 * Suspended - return to VM.cpp.
2630 */
2631 case EMSTATE_SUSPENDED:
2632 TMR3NotifySuspend(pVM, pVCpu);
2633 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2634 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2635 return VINF_EM_SUSPEND;
2636
2637 /*
2638 * Debugging in the guest.
2639 */
2640 case EMSTATE_DEBUG_GUEST_RAW:
2641 case EMSTATE_DEBUG_GUEST_HM:
2642 case EMSTATE_DEBUG_GUEST_IEM:
2643 case EMSTATE_DEBUG_GUEST_REM:
2644 TMR3NotifySuspend(pVM, pVCpu);
2645 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2646 TMR3NotifyResume(pVM, pVCpu);
2647 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2648 break;
2649
2650 /*
2651 * Debugging in the hypervisor.
2652 */
2653 case EMSTATE_DEBUG_HYPER:
2654 {
2655 TMR3NotifySuspend(pVM, pVCpu);
2656 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2657
2658 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2659 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2660 if (rc != VINF_SUCCESS)
2661 {
2662 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2663 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2664 else
2665 {
2666 /* switch to guru meditation mode */
2667 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2668 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2669 VMMR3FatalDump(pVM, pVCpu, rc);
2670 }
2671 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2672 return rc;
2673 }
2674
2675 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2676 TMR3NotifyResume(pVM, pVCpu);
2677 break;
2678 }
2679
2680 /*
2681 * Guru meditation takes place in the debugger.
2682 */
2683 case EMSTATE_GURU_MEDITATION:
2684 {
2685 TMR3NotifySuspend(pVM, pVCpu);
2686 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2687 VMMR3FatalDump(pVM, pVCpu, rc);
2688 emR3Debug(pVM, pVCpu, rc);
2689 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2690 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2691 return rc;
2692 }
2693
2694 /*
2695 * The states we don't expect here.
2696 */
2697 case EMSTATE_NONE:
2698 case EMSTATE_TERMINATING:
2699 default:
2700 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2701 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2702 TMR3NotifySuspend(pVM, pVCpu);
2703 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2704 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2705 return VERR_EM_INTERNAL_ERROR;
2706 }
2707 } /* The Outer Main Loop */
2708 }
2709 else
2710 {
2711 /*
2712 * Fatal error.
2713 */
2714 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2715 TMR3NotifySuspend(pVM, pVCpu);
2716 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2717 VMMR3FatalDump(pVM, pVCpu, rc);
2718 emR3Debug(pVM, pVCpu, rc);
2719 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2720 /** @todo change the VM state! */
2721 return rc;
2722 }
2723
2724 /* (won't ever get here). */
2725 AssertFailed();
2726}
2727
2728/**
2729 * Notify EM of a state change (used by FTM)
2730 *
2731 * @param pVM The cross context VM structure.
2732 */
2733VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2734{
2735 PVMCPU pVCpu = VMMGetCpu(pVM);
2736
2737 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2738 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2739 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2740 return VINF_SUCCESS;
2741}
2742
2743/**
2744 * Notify EM of a state change (used by FTM)
2745 *
2746 * @param pVM The cross context VM structure.
2747 */
2748VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2749{
2750 PVMCPU pVCpu = VMMGetCpu(pVM);
2751 EMSTATE enmCurState = pVCpu->em.s.enmState;
2752
2753 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2754 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2755 pVCpu->em.s.enmPrevState = enmCurState;
2756 return VINF_SUCCESS;
2757}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette