VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 70290

Last change on this file since 70290 was 70000, checked in by vboxsync, 7 years ago

VMM: Nested Hw.virt: Make SVM intercept functions smarter. Avoids swapping of modified VMCB state in a lot of
tricky to detect situations and makes it a lot cleaner that the VMCB is only finally restored before the
#VMEXIT is done.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 129.8 KB
Line 
1/* $Id: EM.cpp 70000 2017-12-08 05:57:18Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#include <VBox/vmm/apic.h>
53#include <VBox/vmm/tm.h>
54#include <VBox/vmm/mm.h>
55#include <VBox/vmm/ssm.h>
56#include <VBox/vmm/pdmapi.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/pdmqueue.h>
59#include <VBox/vmm/hm.h>
60#include <VBox/vmm/patm.h>
61#include "EMInternal.h"
62#include <VBox/vmm/vm.h>
63#include <VBox/vmm/uvm.h>
64#include <VBox/vmm/cpumdis.h>
65#include <VBox/dis.h>
66#include <VBox/disopcode.h>
67#include "VMMTracing.h"
68
69#include <iprt/asm.h>
70#include <iprt/string.h>
71#include <iprt/stream.h>
72#include <iprt/thread.h>
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
79#define EM_NOTIFY_HM
80#endif
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92#if defined(VBOX_WITH_REM) || defined(DEBUG)
93static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
94#endif
95static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
96int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
97
98
99/**
100 * Initializes the EM.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 */
105VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
106{
107 LogFlow(("EMR3Init\n"));
108 /*
109 * Assert alignment and sizes.
110 */
111 AssertCompileMemberAlignment(VM, em.s, 32);
112 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
113 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
114
115 /*
116 * Init the structure.
117 */
118 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
119 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
120 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
121
122 bool fEnabled;
123 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileUser = !fEnabled;
126
127 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->fRecompileSupervisor = !fEnabled;
130
131#ifdef VBOX_WITH_RAW_RING1
132 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
133 AssertLogRelRCReturn(rc, rc);
134#else
135 pVM->fRawRing1Enabled = false; /* Disabled by default. */
136#endif
137
138 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
139 AssertLogRelRCReturn(rc, rc);
140
141 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
142 AssertLogRelRCReturn(rc, rc);
143 pVM->em.s.fGuruOnTripleFault = !fEnabled;
144 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
145 {
146 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
147 pVM->em.s.fGuruOnTripleFault = true;
148 }
149
150 Log(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
151 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
152
153#ifdef VBOX_WITH_REM
154 /*
155 * Initialize the REM critical section.
156 */
157 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
158 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
159 AssertRCReturn(rc, rc);
160#endif
161
162 /*
163 * Saved state.
164 */
165 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
166 NULL, NULL, NULL,
167 NULL, emR3Save, NULL,
168 NULL, emR3Load, NULL);
169 if (RT_FAILURE(rc))
170 return rc;
171
172 for (VMCPUID i = 0; i < pVM->cCpus; i++)
173 {
174 PVMCPU pVCpu = &pVM->aCpus[i];
175
176 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
177 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
178 pVCpu->em.s.fForceRAW = false;
179
180 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
181#ifdef VBOX_WITH_RAW_MODE
182 if (!HMIsEnabled(pVM))
183 {
184 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
185 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
186 }
187#endif
188
189 /* Force reset of the time slice. */
190 pVCpu->em.s.u64TimeSliceStart = 0;
191
192# define EM_REG_COUNTER(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_COUNTER_USED(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204# define EM_REG_PROFILE_ADV(a, b, c) \
205 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
206 AssertRC(rc);
207
208 /*
209 * Statistics.
210 */
211#ifdef VBOX_WITH_STATISTICS
212 PEMSTATS pStats;
213 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
214 if (RT_FAILURE(rc))
215 return rc;
216
217 pVCpu->em.s.pStatsR3 = pStats;
218 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
219 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
220
221 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
222 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
223
224 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
225 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226
227 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301
302 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
303 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
304
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391
392 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
393 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
397 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
418
419 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
420 pVCpu->em.s.pCliStatTree = 0;
421
422 /* these should be considered for release statistics. */
423 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
424 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatHmEntry, "/PROF/CPU%d/EM/HmEnter", "Profiling Hardware Accelerated Mode entry overhead.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHmExec, "/PROF/CPU%d/EM/HmExec", "Profiling Hardware Accelerated Mode execution.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
429 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
433 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
435
436#endif /* VBOX_WITH_STATISTICS */
437
438 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
439 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
440 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
441 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
442 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
443
444 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
445 }
446
447 emR3InitDbg(pVM);
448 return VINF_SUCCESS;
449}
450
451
452/**
453 * Applies relocations to data and code managed by this
454 * component. This function will be called at init and
455 * whenever the VMM need to relocate it self inside the GC.
456 *
457 * @param pVM The cross context VM structure.
458 */
459VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
460{
461 LogFlow(("EMR3Relocate\n"));
462 for (VMCPUID i = 0; i < pVM->cCpus; i++)
463 {
464 PVMCPU pVCpu = &pVM->aCpus[i];
465 if (pVCpu->em.s.pStatsR3)
466 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
467 }
468}
469
470
471/**
472 * Reset the EM state for a CPU.
473 *
474 * Called by EMR3Reset and hot plugging.
475 *
476 * @param pVCpu The cross context virtual CPU structure.
477 */
478VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
479{
480 /* Reset scheduling state. */
481 pVCpu->em.s.fForceRAW = false;
482 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
483
484 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
485 out of the HALTED state here so that enmPrevState doesn't end up as
486 HALTED when EMR3Execute returns. */
487 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
488 {
489 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
490 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
491 }
492}
493
494
495/**
496 * Reset notification.
497 *
498 * @param pVM The cross context VM structure.
499 */
500VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
501{
502 Log(("EMR3Reset: \n"));
503 for (VMCPUID i = 0; i < pVM->cCpus; i++)
504 EMR3ResetCpu(&pVM->aCpus[i]);
505}
506
507
508/**
509 * Terminates the EM.
510 *
511 * Termination means cleaning up and freeing all resources,
512 * the VM it self is at this point powered off or suspended.
513 *
514 * @returns VBox status code.
515 * @param pVM The cross context VM structure.
516 */
517VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
518{
519 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
520
521#ifdef VBOX_WITH_REM
522 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
523#else
524 RT_NOREF(pVM);
525#endif
526 return VINF_SUCCESS;
527}
528
529
530/**
531 * Execute state save operation.
532 *
533 * @returns VBox status code.
534 * @param pVM The cross context VM structure.
535 * @param pSSM SSM operation handle.
536 */
537static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
538{
539 for (VMCPUID i = 0; i < pVM->cCpus; i++)
540 {
541 PVMCPU pVCpu = &pVM->aCpus[i];
542
543 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
544 AssertRCReturn(rc, rc);
545
546 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
547 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
548 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
549 AssertRCReturn(rc, rc);
550
551 /* Save mwait state. */
552 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
553 AssertRCReturn(rc, rc);
554 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
555 AssertRCReturn(rc, rc);
556 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
557 AssertRCReturn(rc, rc);
558 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
559 AssertRCReturn(rc, rc);
560 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
561 AssertRCReturn(rc, rc);
562 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
563 AssertRCReturn(rc, rc);
564 }
565 return VINF_SUCCESS;
566}
567
568
569/**
570 * Execute state load operation.
571 *
572 * @returns VBox status code.
573 * @param pVM The cross context VM structure.
574 * @param pSSM SSM operation handle.
575 * @param uVersion Data layout version.
576 * @param uPass The data pass.
577 */
578static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
579{
580 /*
581 * Validate version.
582 */
583 if ( uVersion > EM_SAVED_STATE_VERSION
584 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
585 {
586 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
587 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
588 }
589 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
590
591 /*
592 * Load the saved state.
593 */
594 for (VMCPUID i = 0; i < pVM->cCpus; i++)
595 {
596 PVMCPU pVCpu = &pVM->aCpus[i];
597
598 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
599 if (RT_FAILURE(rc))
600 pVCpu->em.s.fForceRAW = false;
601 AssertRCReturn(rc, rc);
602
603 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
604 {
605 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
606 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
607 AssertRCReturn(rc, rc);
608 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
609
610 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
611 }
612 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
613 {
614 /* Load mwait state. */
615 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
616 AssertRCReturn(rc, rc);
617 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
618 AssertRCReturn(rc, rc);
619 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
620 AssertRCReturn(rc, rc);
621 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
622 AssertRCReturn(rc, rc);
623 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
624 AssertRCReturn(rc, rc);
625 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
626 AssertRCReturn(rc, rc);
627 }
628
629 Assert(!pVCpu->em.s.pCliStatTree);
630 }
631 return VINF_SUCCESS;
632}
633
634
635/**
636 * Argument packet for emR3SetExecutionPolicy.
637 */
638struct EMR3SETEXECPOLICYARGS
639{
640 EMEXECPOLICY enmPolicy;
641 bool fEnforce;
642};
643
644
645/**
646 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
647 */
648static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
649{
650 /*
651 * Only the first CPU changes the variables.
652 */
653 if (pVCpu->idCpu == 0)
654 {
655 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
656 switch (pArgs->enmPolicy)
657 {
658 case EMEXECPOLICY_RECOMPILE_RING0:
659 pVM->fRecompileSupervisor = pArgs->fEnforce;
660 break;
661 case EMEXECPOLICY_RECOMPILE_RING3:
662 pVM->fRecompileUser = pArgs->fEnforce;
663 break;
664 case EMEXECPOLICY_IEM_ALL:
665 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
666 break;
667 default:
668 AssertFailedReturn(VERR_INVALID_PARAMETER);
669 }
670 Log(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
671 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
672 }
673
674 /*
675 * Force rescheduling if in RAW, HM, IEM, or REM.
676 */
677 return pVCpu->em.s.enmState == EMSTATE_RAW
678 || pVCpu->em.s.enmState == EMSTATE_HM
679 || pVCpu->em.s.enmState == EMSTATE_IEM
680 || pVCpu->em.s.enmState == EMSTATE_REM
681 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
682 ? VINF_EM_RESCHEDULE
683 : VINF_SUCCESS;
684}
685
686
687/**
688 * Changes an execution scheduling policy parameter.
689 *
690 * This is used to enable or disable raw-mode / hardware-virtualization
691 * execution of user and supervisor code.
692 *
693 * @returns VINF_SUCCESS on success.
694 * @returns VINF_RESCHEDULE if a rescheduling might be required.
695 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
696 *
697 * @param pUVM The user mode VM handle.
698 * @param enmPolicy The scheduling policy to change.
699 * @param fEnforce Whether to enforce the policy or not.
700 */
701VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
702{
703 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
704 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
705 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
706
707 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
708 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
709}
710
711
712/**
713 * Queries an execution scheduling policy parameter.
714 *
715 * @returns VBox status code
716 * @param pUVM The user mode VM handle.
717 * @param enmPolicy The scheduling policy to query.
718 * @param pfEnforced Where to return the current value.
719 */
720VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
721{
722 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
723 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
724 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
725 PVM pVM = pUVM->pVM;
726 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
727
728 /* No need to bother EMTs with a query. */
729 switch (enmPolicy)
730 {
731 case EMEXECPOLICY_RECOMPILE_RING0:
732 *pfEnforced = pVM->fRecompileSupervisor;
733 break;
734 case EMEXECPOLICY_RECOMPILE_RING3:
735 *pfEnforced = pVM->fRecompileUser;
736 break;
737 case EMEXECPOLICY_IEM_ALL:
738 *pfEnforced = pVM->em.s.fIemExecutesAll;
739 break;
740 default:
741 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
742 }
743
744 return VINF_SUCCESS;
745}
746
747
748/**
749 * Raise a fatal error.
750 *
751 * Safely terminate the VM with full state report and stuff. This function
752 * will naturally never return.
753 *
754 * @param pVCpu The cross context virtual CPU structure.
755 * @param rc VBox status code.
756 */
757VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
758{
759 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
760 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
761}
762
763
764#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
765/**
766 * Gets the EM state name.
767 *
768 * @returns pointer to read only state name,
769 * @param enmState The state.
770 */
771static const char *emR3GetStateName(EMSTATE enmState)
772{
773 switch (enmState)
774 {
775 case EMSTATE_NONE: return "EMSTATE_NONE";
776 case EMSTATE_RAW: return "EMSTATE_RAW";
777 case EMSTATE_HM: return "EMSTATE_HM";
778 case EMSTATE_IEM: return "EMSTATE_IEM";
779 case EMSTATE_REM: return "EMSTATE_REM";
780 case EMSTATE_HALTED: return "EMSTATE_HALTED";
781 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
782 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
783 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
784 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
785 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
786 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
787 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
788 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
789 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
790 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
791 default: return "Unknown!";
792 }
793}
794#endif /* LOG_ENABLED || VBOX_STRICT */
795
796
797/**
798 * Debug loop.
799 *
800 * @returns VBox status code for EM.
801 * @param pVM The cross context VM structure.
802 * @param pVCpu The cross context virtual CPU structure.
803 * @param rc Current EM VBox status code.
804 */
805static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
806{
807 for (;;)
808 {
809 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
810 const VBOXSTRICTRC rcLast = rc;
811
812 /*
813 * Debug related RC.
814 */
815 switch (VBOXSTRICTRC_VAL(rc))
816 {
817 /*
818 * Single step an instruction.
819 */
820 case VINF_EM_DBG_STEP:
821 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
822 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
823 || pVCpu->em.s.fForceRAW /* paranoia */)
824#ifdef VBOX_WITH_RAW_MODE
825 rc = emR3RawStep(pVM, pVCpu);
826#else
827 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
828#endif
829 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
830 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
831#ifdef VBOX_WITH_REM
832 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
833 rc = emR3RemStep(pVM, pVCpu);
834#endif
835 else
836 {
837 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
838 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
839 rc = VINF_EM_DBG_STEPPED;
840 }
841 break;
842
843 /*
844 * Simple events: stepped, breakpoint, stop/assertion.
845 */
846 case VINF_EM_DBG_STEPPED:
847 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
848 break;
849
850 case VINF_EM_DBG_BREAKPOINT:
851 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
852 break;
853
854 case VINF_EM_DBG_STOP:
855 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
856 break;
857
858 case VINF_EM_DBG_EVENT:
859 rc = DBGFR3EventHandlePending(pVM, pVCpu);
860 break;
861
862 case VINF_EM_DBG_HYPER_STEPPED:
863 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
864 break;
865
866 case VINF_EM_DBG_HYPER_BREAKPOINT:
867 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
868 break;
869
870 case VINF_EM_DBG_HYPER_ASSERTION:
871 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
872 RTLogFlush(NULL);
873 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
874 break;
875
876 /*
877 * Guru meditation.
878 */
879 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
880 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
881 break;
882 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
883 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
884 break;
885 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
886 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
887 break;
888
889 default: /** @todo don't use default for guru, but make special errors code! */
890 {
891 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
892 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
893 break;
894 }
895 }
896
897 /*
898 * Process the result.
899 */
900 switch (VBOXSTRICTRC_VAL(rc))
901 {
902 /*
903 * Continue the debugging loop.
904 */
905 case VINF_EM_DBG_STEP:
906 case VINF_EM_DBG_STOP:
907 case VINF_EM_DBG_EVENT:
908 case VINF_EM_DBG_STEPPED:
909 case VINF_EM_DBG_BREAKPOINT:
910 case VINF_EM_DBG_HYPER_STEPPED:
911 case VINF_EM_DBG_HYPER_BREAKPOINT:
912 case VINF_EM_DBG_HYPER_ASSERTION:
913 break;
914
915 /*
916 * Resuming execution (in some form) has to be done here if we got
917 * a hypervisor debug event.
918 */
919 case VINF_SUCCESS:
920 case VINF_EM_RESUME:
921 case VINF_EM_SUSPEND:
922 case VINF_EM_RESCHEDULE:
923 case VINF_EM_RESCHEDULE_RAW:
924 case VINF_EM_RESCHEDULE_REM:
925 case VINF_EM_HALT:
926 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
927 {
928#ifdef VBOX_WITH_RAW_MODE
929 rc = emR3RawResumeHyper(pVM, pVCpu);
930 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
931 continue;
932#else
933 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
934#endif
935 }
936 if (rc == VINF_SUCCESS)
937 rc = VINF_EM_RESCHEDULE;
938 return rc;
939
940 /*
941 * The debugger isn't attached.
942 * We'll simply turn the thing off since that's the easiest thing to do.
943 */
944 case VERR_DBGF_NOT_ATTACHED:
945 switch (VBOXSTRICTRC_VAL(rcLast))
946 {
947 case VINF_EM_DBG_HYPER_STEPPED:
948 case VINF_EM_DBG_HYPER_BREAKPOINT:
949 case VINF_EM_DBG_HYPER_ASSERTION:
950 case VERR_TRPM_PANIC:
951 case VERR_TRPM_DONT_PANIC:
952 case VERR_VMM_RING0_ASSERTION:
953 case VERR_VMM_HYPER_CR3_MISMATCH:
954 case VERR_VMM_RING3_CALL_DISABLED:
955 return rcLast;
956 }
957 return VINF_EM_OFF;
958
959 /*
960 * Status codes terminating the VM in one or another sense.
961 */
962 case VINF_EM_TERMINATE:
963 case VINF_EM_OFF:
964 case VINF_EM_RESET:
965 case VINF_EM_NO_MEMORY:
966 case VINF_EM_RAW_STALE_SELECTOR:
967 case VINF_EM_RAW_IRET_TRAP:
968 case VERR_TRPM_PANIC:
969 case VERR_TRPM_DONT_PANIC:
970 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
971 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
972 case VERR_VMM_RING0_ASSERTION:
973 case VERR_VMM_HYPER_CR3_MISMATCH:
974 case VERR_VMM_RING3_CALL_DISABLED:
975 case VERR_INTERNAL_ERROR:
976 case VERR_INTERNAL_ERROR_2:
977 case VERR_INTERNAL_ERROR_3:
978 case VERR_INTERNAL_ERROR_4:
979 case VERR_INTERNAL_ERROR_5:
980 case VERR_IPE_UNEXPECTED_STATUS:
981 case VERR_IPE_UNEXPECTED_INFO_STATUS:
982 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
983 return rc;
984
985 /*
986 * The rest is unexpected, and will keep us here.
987 */
988 default:
989 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
990 break;
991 }
992 } /* debug for ever */
993}
994
995
996#if defined(VBOX_WITH_REM) || defined(DEBUG)
997/**
998 * Steps recompiled code.
999 *
1000 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1001 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1002 *
1003 * @param pVM The cross context VM structure.
1004 * @param pVCpu The cross context virtual CPU structure.
1005 */
1006static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1007{
1008 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1009
1010# ifdef VBOX_WITH_REM
1011 EMRemLock(pVM);
1012
1013 /*
1014 * Switch to REM, step instruction, switch back.
1015 */
1016 int rc = REMR3State(pVM, pVCpu);
1017 if (RT_SUCCESS(rc))
1018 {
1019 rc = REMR3Step(pVM, pVCpu);
1020 REMR3StateBack(pVM, pVCpu);
1021 }
1022 EMRemUnlock(pVM);
1023
1024# else
1025 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1026# endif
1027
1028 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1029 return rc;
1030}
1031#endif /* VBOX_WITH_REM || DEBUG */
1032
1033
1034#ifdef VBOX_WITH_REM
1035/**
1036 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1037 * critical section.
1038 *
1039 * @returns false - new fInREMState value.
1040 * @param pVM The cross context VM structure.
1041 * @param pVCpu The cross context virtual CPU structure.
1042 */
1043DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1044{
1045 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1046 REMR3StateBack(pVM, pVCpu);
1047 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1048
1049 EMRemUnlock(pVM);
1050 return false;
1051}
1052#endif
1053
1054
1055/**
1056 * Executes recompiled code.
1057 *
1058 * This function contains the recompiler version of the inner
1059 * execution loop (the outer loop being in EMR3ExecuteVM()).
1060 *
1061 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1062 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1063 *
1064 * @param pVM The cross context VM structure.
1065 * @param pVCpu The cross context virtual CPU structure.
1066 * @param pfFFDone Where to store an indicator telling whether or not
1067 * FFs were done before returning.
1068 *
1069 */
1070static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1071{
1072#ifdef LOG_ENABLED
1073 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1074 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1075
1076 if (pCtx->eflags.Bits.u1VM)
1077 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1078 else
1079 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1080#endif
1081 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1082
1083#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1084 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1085 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1086 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1087#endif
1088
1089 /*
1090 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1091 * or the REM suggests raw-mode execution.
1092 */
1093 *pfFFDone = false;
1094#ifdef VBOX_WITH_REM
1095 bool fInREMState = false;
1096#else
1097 uint32_t cLoops = 0;
1098#endif
1099 int rc = VINF_SUCCESS;
1100 for (;;)
1101 {
1102#ifdef VBOX_WITH_REM
1103 /*
1104 * Lock REM and update the state if not already in sync.
1105 *
1106 * Note! Big lock, but you are not supposed to own any lock when
1107 * coming in here.
1108 */
1109 if (!fInREMState)
1110 {
1111 EMRemLock(pVM);
1112 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1113
1114 /* Flush the recompiler translation blocks if the VCPU has changed,
1115 also force a full CPU state resync. */
1116 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1117 {
1118 REMFlushTBs(pVM);
1119 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1120 }
1121 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1122
1123 rc = REMR3State(pVM, pVCpu);
1124
1125 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1126 if (RT_FAILURE(rc))
1127 break;
1128 fInREMState = true;
1129
1130 /*
1131 * We might have missed the raising of VMREQ, TIMER and some other
1132 * important FFs while we were busy switching the state. So, check again.
1133 */
1134 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1135 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1136 {
1137 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1138 goto l_REMDoForcedActions;
1139 }
1140 }
1141#endif
1142
1143 /*
1144 * Execute REM.
1145 */
1146 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1147 {
1148 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1149#ifdef VBOX_WITH_REM
1150 rc = REMR3Run(pVM, pVCpu);
1151#else
1152 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1153#endif
1154 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1155 }
1156 else
1157 {
1158 /* Give up this time slice; virtual time continues */
1159 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1160 RTThreadSleep(5);
1161 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1162 rc = VINF_SUCCESS;
1163 }
1164
1165 /*
1166 * Deal with high priority post execution FFs before doing anything
1167 * else. Sync back the state and leave the lock to be on the safe side.
1168 */
1169 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1170 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1171 {
1172#ifdef VBOX_WITH_REM
1173 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1174#endif
1175 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1176 }
1177
1178 /*
1179 * Process the returned status code.
1180 */
1181 if (rc != VINF_SUCCESS)
1182 {
1183 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1184 break;
1185 if (rc != VINF_REM_INTERRUPED_FF)
1186 {
1187#ifndef VBOX_WITH_REM
1188 /* Try dodge unimplemented IEM trouble by reschduling. */
1189 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1190 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1191 {
1192 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1193 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1194 {
1195 rc = VINF_EM_RESCHEDULE;
1196 break;
1197 }
1198 }
1199#endif
1200
1201 /*
1202 * Anything which is not known to us means an internal error
1203 * and the termination of the VM!
1204 */
1205 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1206 break;
1207 }
1208 }
1209
1210
1211 /*
1212 * Check and execute forced actions.
1213 *
1214 * Sync back the VM state and leave the lock before calling any of
1215 * these, you never know what's going to happen here.
1216 */
1217#ifdef VBOX_HIGH_RES_TIMERS_HACK
1218 TMTimerPollVoid(pVM, pVCpu);
1219#endif
1220 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1221 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1222 || VMCPU_FF_IS_PENDING(pVCpu,
1223 VMCPU_FF_ALL_REM_MASK
1224 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1225 {
1226#ifdef VBOX_WITH_REM
1227l_REMDoForcedActions:
1228 if (fInREMState)
1229 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1230#endif
1231 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1232 rc = emR3ForcedActions(pVM, pVCpu, rc);
1233 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1234 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1235 if ( rc != VINF_SUCCESS
1236 && rc != VINF_EM_RESCHEDULE_REM)
1237 {
1238 *pfFFDone = true;
1239 break;
1240 }
1241 }
1242
1243#ifndef VBOX_WITH_REM
1244 /*
1245 * Have to check if we can get back to fast execution mode every so often.
1246 */
1247 if (!(++cLoops & 7))
1248 {
1249 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1250 if ( enmCheck != EMSTATE_REM
1251 && enmCheck != EMSTATE_IEM_THEN_REM)
1252 return VINF_EM_RESCHEDULE;
1253 }
1254#endif
1255
1256 } /* The Inner Loop, recompiled execution mode version. */
1257
1258
1259#ifdef VBOX_WITH_REM
1260 /*
1261 * Returning. Sync back the VM state if required.
1262 */
1263 if (fInREMState)
1264 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1265#endif
1266
1267 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1268 return rc;
1269}
1270
1271
1272#ifdef DEBUG
1273
1274int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1275{
1276 EMSTATE enmOldState = pVCpu->em.s.enmState;
1277
1278 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1279
1280 Log(("Single step BEGIN:\n"));
1281 for (uint32_t i = 0; i < cIterations; i++)
1282 {
1283 DBGFR3PrgStep(pVCpu);
1284 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1285 emR3RemStep(pVM, pVCpu);
1286 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1287 break;
1288 }
1289 Log(("Single step END:\n"));
1290 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1291 pVCpu->em.s.enmState = enmOldState;
1292 return VINF_EM_RESCHEDULE;
1293}
1294
1295#endif /* DEBUG */
1296
1297
1298/**
1299 * Try execute the problematic code in IEM first, then fall back on REM if there
1300 * is too much of it or if IEM doesn't implement something.
1301 *
1302 * @returns Strict VBox status code from IEMExecLots.
1303 * @param pVM The cross context VM structure.
1304 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1305 * @param pfFFDone Force flags done indicator.
1306 *
1307 * @thread EMT(pVCpu)
1308 */
1309static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1310{
1311 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1312 *pfFFDone = false;
1313
1314 /*
1315 * Execute in IEM for a while.
1316 */
1317 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1318 {
1319 uint32_t cInstructions;
1320 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1321 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1322 if (rcStrict != VINF_SUCCESS)
1323 {
1324 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1325 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1326 break;
1327
1328 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1329 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1330 return rcStrict;
1331 }
1332
1333 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1334 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1335 {
1336 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1337 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1338 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1339 pVCpu->em.s.enmState = enmNewState;
1340 return VINF_SUCCESS;
1341 }
1342
1343 /*
1344 * Check for pending actions.
1345 */
1346 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1347 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1348 return VINF_SUCCESS;
1349 }
1350
1351 /*
1352 * Switch to REM.
1353 */
1354 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1355 pVCpu->em.s.enmState = EMSTATE_REM;
1356 return VINF_SUCCESS;
1357}
1358
1359
1360/**
1361 * Decides whether to execute RAW, HWACC or REM.
1362 *
1363 * @returns new EM state
1364 * @param pVM The cross context VM structure.
1365 * @param pVCpu The cross context virtual CPU structure.
1366 * @param pCtx Pointer to the guest CPU context.
1367 */
1368EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1369{
1370 /*
1371 * When forcing raw-mode execution, things are simple.
1372 */
1373 if (pVCpu->em.s.fForceRAW)
1374 return EMSTATE_RAW;
1375
1376 /*
1377 * We stay in the wait for SIPI state unless explicitly told otherwise.
1378 */
1379 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1380 return EMSTATE_WAIT_SIPI;
1381
1382 /*
1383 * Execute everything in IEM?
1384 */
1385 if (pVM->em.s.fIemExecutesAll)
1386 return EMSTATE_IEM;
1387
1388 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1389 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1390 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1391
1392 X86EFLAGS EFlags = pCtx->eflags;
1393 if (HMIsEnabled(pVM))
1394 {
1395 /*
1396 * Hardware accelerated raw-mode:
1397 */
1398 if ( EMIsHwVirtExecutionEnabled(pVM)
1399 && HMR3CanExecuteGuest(pVM, pCtx))
1400 return EMSTATE_HM;
1401
1402 /*
1403 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1404 * turns off monitoring features essential for raw mode!
1405 */
1406 return EMSTATE_IEM_THEN_REM;
1407 }
1408
1409 /*
1410 * Standard raw-mode:
1411 *
1412 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1413 * or 32 bits protected mode ring 0 code
1414 *
1415 * The tests are ordered by the likelihood of being true during normal execution.
1416 */
1417 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1418 {
1419 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1420 return EMSTATE_REM;
1421 }
1422
1423# ifndef VBOX_RAW_V86
1424 if (EFlags.u32 & X86_EFL_VM) {
1425 Log2(("raw mode refused: VM_MASK\n"));
1426 return EMSTATE_REM;
1427 }
1428# endif
1429
1430 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1431 uint32_t u32CR0 = pCtx->cr0;
1432 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1433 {
1434 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1435 return EMSTATE_REM;
1436 }
1437
1438 if (pCtx->cr4 & X86_CR4_PAE)
1439 {
1440 uint32_t u32Dummy, u32Features;
1441
1442 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1443 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1444 return EMSTATE_REM;
1445 }
1446
1447 unsigned uSS = pCtx->ss.Sel;
1448 if ( pCtx->eflags.Bits.u1VM
1449 || (uSS & X86_SEL_RPL) == 3)
1450 {
1451 if (!EMIsRawRing3Enabled(pVM))
1452 return EMSTATE_REM;
1453
1454 if (!(EFlags.u32 & X86_EFL_IF))
1455 {
1456 Log2(("raw mode refused: IF (RawR3)\n"));
1457 return EMSTATE_REM;
1458 }
1459
1460 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1461 {
1462 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1463 return EMSTATE_REM;
1464 }
1465 }
1466 else
1467 {
1468 if (!EMIsRawRing0Enabled(pVM))
1469 return EMSTATE_REM;
1470
1471 if (EMIsRawRing1Enabled(pVM))
1472 {
1473 /* Only ring 0 and 1 supervisor code. */
1474 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1475 {
1476 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1477 return EMSTATE_REM;
1478 }
1479 }
1480 /* Only ring 0 supervisor code. */
1481 else if ((uSS & X86_SEL_RPL) != 0)
1482 {
1483 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1484 return EMSTATE_REM;
1485 }
1486
1487 // Let's start with pure 32 bits ring 0 code first
1488 /** @todo What's pure 32-bit mode? flat? */
1489 if ( !(pCtx->ss.Attr.n.u1DefBig)
1490 || !(pCtx->cs.Attr.n.u1DefBig))
1491 {
1492 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1493 return EMSTATE_REM;
1494 }
1495
1496 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1497 if (!(u32CR0 & X86_CR0_WP))
1498 {
1499 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1500 return EMSTATE_REM;
1501 }
1502
1503# ifdef VBOX_WITH_RAW_MODE
1504 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1505 {
1506 Log2(("raw r0 mode forced: patch code\n"));
1507# ifdef VBOX_WITH_SAFE_STR
1508 Assert(pCtx->tr.Sel);
1509# endif
1510 return EMSTATE_RAW;
1511 }
1512# endif /* VBOX_WITH_RAW_MODE */
1513
1514# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1515 if (!(EFlags.u32 & X86_EFL_IF))
1516 {
1517 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1518 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1519 return EMSTATE_REM;
1520 }
1521# endif
1522
1523# ifndef VBOX_WITH_RAW_RING1
1524 /** @todo still necessary??? */
1525 if (EFlags.Bits.u2IOPL != 0)
1526 {
1527 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1528 return EMSTATE_REM;
1529 }
1530# endif
1531 }
1532
1533 /*
1534 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1535 */
1536 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1537 {
1538 Log2(("raw mode refused: stale CS\n"));
1539 return EMSTATE_REM;
1540 }
1541 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1542 {
1543 Log2(("raw mode refused: stale SS\n"));
1544 return EMSTATE_REM;
1545 }
1546 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1547 {
1548 Log2(("raw mode refused: stale DS\n"));
1549 return EMSTATE_REM;
1550 }
1551 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1552 {
1553 Log2(("raw mode refused: stale ES\n"));
1554 return EMSTATE_REM;
1555 }
1556 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1557 {
1558 Log2(("raw mode refused: stale FS\n"));
1559 return EMSTATE_REM;
1560 }
1561 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1562 {
1563 Log2(("raw mode refused: stale GS\n"));
1564 return EMSTATE_REM;
1565 }
1566
1567# ifdef VBOX_WITH_SAFE_STR
1568 if (pCtx->tr.Sel == 0)
1569 {
1570 Log(("Raw mode refused -> TR=0\n"));
1571 return EMSTATE_REM;
1572 }
1573# endif
1574
1575 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1576 return EMSTATE_RAW;
1577}
1578
1579
1580/**
1581 * Executes all high priority post execution force actions.
1582 *
1583 * @returns rc or a fatal status code.
1584 *
1585 * @param pVM The cross context VM structure.
1586 * @param pVCpu The cross context virtual CPU structure.
1587 * @param rc The current rc.
1588 */
1589int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1590{
1591 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1592
1593 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1594 PDMCritSectBothFF(pVCpu);
1595
1596 /* Update CR3 (Nested Paging case for HM). */
1597 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1598 {
1599 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1600 if (RT_FAILURE(rc2))
1601 return rc2;
1602 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1603 }
1604
1605 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1606 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1607 {
1608 if (CPUMIsGuestInPAEMode(pVCpu))
1609 {
1610 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1611 AssertPtr(pPdpes);
1612
1613 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1614 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1615 }
1616 else
1617 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1618 }
1619
1620 /* IEM has pending work (typically memory write after INS instruction). */
1621 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1622 rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
1623
1624 /* IOM has pending work (comitting an I/O or MMIO write). */
1625 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1626 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1627
1628#ifdef VBOX_WITH_RAW_MODE
1629 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1630 CSAMR3DoPendingAction(pVM, pVCpu);
1631#endif
1632
1633 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1634 {
1635 if ( rc > VINF_EM_NO_MEMORY
1636 && rc <= VINF_EM_LAST)
1637 rc = VINF_EM_NO_MEMORY;
1638 }
1639
1640 return rc;
1641}
1642
1643
1644/**
1645 * Executes all pending forced actions.
1646 *
1647 * Forced actions can cause execution delays and execution
1648 * rescheduling. The first we deal with using action priority, so
1649 * that for instance pending timers aren't scheduled and ran until
1650 * right before execution. The rescheduling we deal with using
1651 * return codes. The same goes for VM termination, only in that case
1652 * we exit everything.
1653 *
1654 * @returns VBox status code of equal or greater importance/severity than rc.
1655 * The most important ones are: VINF_EM_RESCHEDULE,
1656 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1657 *
1658 * @param pVM The cross context VM structure.
1659 * @param pVCpu The cross context virtual CPU structure.
1660 * @param rc The current rc.
1661 *
1662 */
1663int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1664{
1665 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1666#ifdef VBOX_STRICT
1667 int rcIrq = VINF_SUCCESS;
1668#endif
1669 int rc2;
1670#define UPDATE_RC() \
1671 do { \
1672 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1673 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1674 break; \
1675 if (!rc || rc2 < rc) \
1676 rc = rc2; \
1677 } while (0)
1678 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1679
1680 /*
1681 * Post execution chunk first.
1682 */
1683 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1684 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1685 {
1686 /*
1687 * EMT Rendezvous (must be serviced before termination).
1688 */
1689 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1690 {
1691 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1692 UPDATE_RC();
1693 /** @todo HACK ALERT! The following test is to make sure EM+TM
1694 * thinks the VM is stopped/reset before the next VM state change
1695 * is made. We need a better solution for this, or at least make it
1696 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1697 * VINF_EM_SUSPEND). */
1698 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1699 {
1700 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1701 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1702 return rc;
1703 }
1704 }
1705
1706 /*
1707 * State change request (cleared by vmR3SetStateLocked).
1708 */
1709 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1710 {
1711 VMSTATE enmState = VMR3GetState(pVM);
1712 switch (enmState)
1713 {
1714 case VMSTATE_FATAL_ERROR:
1715 case VMSTATE_FATAL_ERROR_LS:
1716 case VMSTATE_GURU_MEDITATION:
1717 case VMSTATE_GURU_MEDITATION_LS:
1718 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1719 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1720 return VINF_EM_SUSPEND;
1721
1722 case VMSTATE_DESTROYING:
1723 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1724 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1725 return VINF_EM_TERMINATE;
1726
1727 default:
1728 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1729 }
1730 }
1731
1732 /*
1733 * Debugger Facility polling.
1734 */
1735 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1736 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1737 {
1738 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1739 UPDATE_RC();
1740 }
1741
1742 /*
1743 * Postponed reset request.
1744 */
1745 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1746 {
1747 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1748 UPDATE_RC();
1749 }
1750
1751#ifdef VBOX_WITH_RAW_MODE
1752 /*
1753 * CSAM page scanning.
1754 */
1755 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1756 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1757 {
1758 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1759
1760 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1761 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1762
1763 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1764 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1765 }
1766#endif
1767
1768 /*
1769 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1770 */
1771 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1772 {
1773 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1774 UPDATE_RC();
1775 if (rc == VINF_EM_NO_MEMORY)
1776 return rc;
1777 }
1778
1779 /* check that we got them all */
1780 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1781 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1782 }
1783
1784 /*
1785 * Normal priority then.
1786 * (Executed in no particular order.)
1787 */
1788 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1789 {
1790 /*
1791 * PDM Queues are pending.
1792 */
1793 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1794 PDMR3QueueFlushAll(pVM);
1795
1796 /*
1797 * PDM DMA transfers are pending.
1798 */
1799 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1800 PDMR3DmaRun(pVM);
1801
1802 /*
1803 * EMT Rendezvous (make sure they are handled before the requests).
1804 */
1805 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1806 {
1807 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1808 UPDATE_RC();
1809 /** @todo HACK ALERT! The following test is to make sure EM+TM
1810 * thinks the VM is stopped/reset before the next VM state change
1811 * is made. We need a better solution for this, or at least make it
1812 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1813 * VINF_EM_SUSPEND). */
1814 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1815 {
1816 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1817 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1818 return rc;
1819 }
1820 }
1821
1822 /*
1823 * Requests from other threads.
1824 */
1825 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1826 {
1827 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1828 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1829 {
1830 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1831 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1832 return rc2;
1833 }
1834 UPDATE_RC();
1835 /** @todo HACK ALERT! The following test is to make sure EM+TM
1836 * thinks the VM is stopped/reset before the next VM state change
1837 * is made. We need a better solution for this, or at least make it
1838 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1839 * VINF_EM_SUSPEND). */
1840 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1841 {
1842 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1843 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1844 return rc;
1845 }
1846 }
1847
1848#ifdef VBOX_WITH_REM
1849 /* Replay the handler notification changes. */
1850 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1851 {
1852 /* Try not to cause deadlocks. */
1853 if ( pVM->cCpus == 1
1854 || ( !PGMIsLockOwner(pVM)
1855 && !IOMIsLockWriteOwner(pVM))
1856 )
1857 {
1858 EMRemLock(pVM);
1859 REMR3ReplayHandlerNotifications(pVM);
1860 EMRemUnlock(pVM);
1861 }
1862 }
1863#endif
1864
1865 /* check that we got them all */
1866 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1867 }
1868
1869 /*
1870 * Normal priority then. (per-VCPU)
1871 * (Executed in no particular order.)
1872 */
1873 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1874 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1875 {
1876 /*
1877 * Requests from other threads.
1878 */
1879 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1880 {
1881 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1882 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1883 {
1884 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1885 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1886 return rc2;
1887 }
1888 UPDATE_RC();
1889 /** @todo HACK ALERT! The following test is to make sure EM+TM
1890 * thinks the VM is stopped/reset before the next VM state change
1891 * is made. We need a better solution for this, or at least make it
1892 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1893 * VINF_EM_SUSPEND). */
1894 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1895 {
1896 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1897 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1898 return rc;
1899 }
1900 }
1901
1902 /* check that we got them all */
1903 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
1904 }
1905
1906 /*
1907 * High priority pre execution chunk last.
1908 * (Executed in ascending priority order.)
1909 */
1910 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1911 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1912 {
1913 /*
1914 * Timers before interrupts.
1915 */
1916 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
1917 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1918 TMR3TimerQueuesDo(pVM);
1919
1920 /*
1921 * Pick up asynchronously posted interrupts into the APIC.
1922 */
1923 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
1924 APICUpdatePendingInterrupts(pVCpu);
1925
1926 /*
1927 * The instruction following an emulated STI should *always* be executed!
1928 *
1929 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
1930 * the eip is the same as the inhibited instr address. Before we
1931 * are able to execute this instruction in raw mode (iret to
1932 * guest code) an external interrupt might force a world switch
1933 * again. Possibly allowing a guest interrupt to be dispatched
1934 * in the process. This could break the guest. Sounds very
1935 * unlikely, but such timing sensitive problem are not as rare as
1936 * you might think.
1937 */
1938 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1939 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1940 {
1941 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1942 {
1943 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1944 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1945 }
1946 else
1947 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
1948 }
1949
1950 /*
1951 * Interrupts.
1952 */
1953 /** @todo this can be optimized a bit. later. */
1954 bool fWakeupPending = false;
1955 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1956 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
1957 {
1958 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1959 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1960 {
1961 bool fIntrEnabled;
1962 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1963#ifdef VBOX_WITH_RAW_MODE
1964 /* We cannot just inspect EFLAGS when nested hw.virt is enabled (see e.g. CPUMCanSvmNstGstTakePhysIntr). */
1965 fIntrEnabled = !PATMIsPatchGCAddr(pVM, pCtx->eip);
1966#else
1967 fIntrEnabled = true;
1968#endif
1969 /** @todo Can we centralize this under CPUMCanInjectInterrupt()? */
1970#ifdef VBOX_WITH_NESTED_HWVIRT
1971 fIntrEnabled &= pCtx->hwvirt.svm.fGif;
1972 if (fIntrEnabled)
1973 {
1974 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1975 fIntrEnabled = CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx);
1976 else
1977 fIntrEnabled = pCtx->eflags.Bits.u1IF;
1978 }
1979#else
1980 fIntrEnabled &= pCtx->eflags.Bits.u1IF;
1981#endif
1982 if (fIntrEnabled)
1983 {
1984 Assert(!HMR3IsEventPending(pVCpu));
1985 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1986 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1987 {
1988#ifdef VBOX_WITH_NESTED_HWVIRT
1989 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1990 {
1991 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1992 if (RT_SUCCESS(rcStrict))
1993 rc2 = VINF_EM_RESCHEDULE;
1994 else
1995 {
1996 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1997 Log(("EM: SVM Nested-guest INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1998 /** @todo should we call iemInitiateCpuShutdown? Should this
1999 * result in trapping triple-fault intercepts? */
2000 rc2 = VINF_EM_TRIPLE_FAULT;
2001 }
2002 }
2003 else
2004#endif
2005 {
2006 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2007 /** @todo this really isn't nice, should properly handle this */
2008 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2009 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2010 || rc2 == VINF_EM_RESCHEDULE_HM
2011 || rc2 == VINF_EM_RESCHEDULE_RAW))
2012 rc2 = VINF_EM_RESCHEDULE;
2013#ifdef VBOX_STRICT
2014 rcIrq = rc2;
2015#endif
2016 }
2017 UPDATE_RC();
2018 /* Reschedule required: We must not miss the wakeup below! */
2019 fWakeupPending = true;
2020 }
2021#ifdef VBOX_WITH_NESTED_HWVIRT
2022 else if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST))
2023 {
2024 /*
2025 * Check nested-guest virtual interrupts.
2026 */
2027 if (CPUMCanSvmNstGstTakeVirtIntr(pCtx))
2028 {
2029 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
2030 {
2031 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
2032 if (RT_SUCCESS(rcStrict))
2033 rc2 = VINF_EM_RESCHEDULE;
2034 else
2035 {
2036 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2037 Log(("EM: SVM Nested-guest VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
2038 /** @todo should we call iemInitiateCpuShutdown? Should this
2039 * result in trapping triple-fault intercepts? */
2040 rc2 = VINF_EM_TRIPLE_FAULT;
2041 }
2042 }
2043 else
2044 {
2045 /*
2046 * Prepare the nested-guest interrupt for injection.
2047 */
2048 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
2049 uint8_t uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
2050 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
2051 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
2052 /** @todo reschedule to HM/REM later, when the HMR0 nested-guest execution is
2053 * done. For now just reschedule to IEM. */
2054 rc2 = VINF_EM_RESCHEDULE;
2055 }
2056 UPDATE_RC();
2057 /* Reschedule required: We must not miss the wakeup below! */
2058 fWakeupPending = true;
2059 }
2060 }
2061#endif /* VBOX_WITH_NESTED_HWVIRT */
2062 }
2063 }
2064 }
2065
2066 /*
2067 * Allocate handy pages.
2068 */
2069 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2070 {
2071 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2072 UPDATE_RC();
2073 }
2074
2075 /*
2076 * Debugger Facility request.
2077 */
2078 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2079 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2080 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2081 {
2082 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2083 UPDATE_RC();
2084 }
2085
2086 /*
2087 * EMT Rendezvous (must be serviced before termination).
2088 */
2089 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2090 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2091 {
2092 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2093 UPDATE_RC();
2094 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2095 * stopped/reset before the next VM state change is made. We need a better
2096 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2097 * && rc >= VINF_EM_SUSPEND). */
2098 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2099 {
2100 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2101 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2102 return rc;
2103 }
2104 }
2105
2106 /*
2107 * State change request (cleared by vmR3SetStateLocked).
2108 */
2109 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2110 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2111 {
2112 VMSTATE enmState = VMR3GetState(pVM);
2113 switch (enmState)
2114 {
2115 case VMSTATE_FATAL_ERROR:
2116 case VMSTATE_FATAL_ERROR_LS:
2117 case VMSTATE_GURU_MEDITATION:
2118 case VMSTATE_GURU_MEDITATION_LS:
2119 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2120 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2121 return VINF_EM_SUSPEND;
2122
2123 case VMSTATE_DESTROYING:
2124 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2125 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2126 return VINF_EM_TERMINATE;
2127
2128 default:
2129 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2130 }
2131 }
2132
2133 /*
2134 * Out of memory? Since most of our fellow high priority actions may cause us
2135 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2136 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2137 * than us since we can terminate without allocating more memory.
2138 */
2139 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2140 {
2141 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2142 UPDATE_RC();
2143 if (rc == VINF_EM_NO_MEMORY)
2144 return rc;
2145 }
2146
2147 /*
2148 * If the virtual sync clock is still stopped, make TM restart it.
2149 */
2150 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2151 TMR3VirtualSyncFF(pVM, pVCpu);
2152
2153#ifdef DEBUG
2154 /*
2155 * Debug, pause the VM.
2156 */
2157 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2158 {
2159 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2160 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2161 return VINF_EM_SUSPEND;
2162 }
2163#endif
2164
2165 /* check that we got them all */
2166 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2167 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2168 }
2169
2170#undef UPDATE_RC
2171 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2172 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2173 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2174 return rc;
2175}
2176
2177
2178/**
2179 * Check if the preset execution time cap restricts guest execution scheduling.
2180 *
2181 * @returns true if allowed, false otherwise
2182 * @param pVM The cross context VM structure.
2183 * @param pVCpu The cross context virtual CPU structure.
2184 */
2185bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2186{
2187 uint64_t u64UserTime, u64KernelTime;
2188
2189 if ( pVM->uCpuExecutionCap != 100
2190 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2191 {
2192 uint64_t u64TimeNow = RTTimeMilliTS();
2193 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2194 {
2195 /* New time slice. */
2196 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2197 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2198 pVCpu->em.s.u64TimeSliceExec = 0;
2199 }
2200 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2201
2202 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2203 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2204 return false;
2205 }
2206 return true;
2207}
2208
2209
2210/**
2211 * Execute VM.
2212 *
2213 * This function is the main loop of the VM. The emulation thread
2214 * calls this function when the VM has been successfully constructed
2215 * and we're ready for executing the VM.
2216 *
2217 * Returning from this function means that the VM is turned off or
2218 * suspended (state already saved) and deconstruction is next in line.
2219 *
2220 * All interaction from other thread are done using forced actions
2221 * and signaling of the wait object.
2222 *
2223 * @returns VBox status code, informational status codes may indicate failure.
2224 * @param pVM The cross context VM structure.
2225 * @param pVCpu The cross context virtual CPU structure.
2226 */
2227VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2228{
2229 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2230 pVM,
2231 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2232 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2233 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2234 pVCpu->em.s.fForceRAW));
2235 VM_ASSERT_EMT(pVM);
2236 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2237 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2238 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2239 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2240
2241 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2242 if (rc == 0)
2243 {
2244 /*
2245 * Start the virtual time.
2246 */
2247 TMR3NotifyResume(pVM, pVCpu);
2248
2249 /*
2250 * The Outer Main Loop.
2251 */
2252 bool fFFDone = false;
2253
2254 /* Reschedule right away to start in the right state. */
2255 rc = VINF_SUCCESS;
2256
2257 /* If resuming after a pause or a state load, restore the previous
2258 state or else we'll start executing code. Else, just reschedule. */
2259 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2260 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2261 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2262 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2263 else
2264 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2265 pVCpu->em.s.cIemThenRemInstructions = 0;
2266 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2267
2268 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2269 for (;;)
2270 {
2271 /*
2272 * Before we can schedule anything (we're here because
2273 * scheduling is required) we must service any pending
2274 * forced actions to avoid any pending action causing
2275 * immediate rescheduling upon entering an inner loop
2276 *
2277 * Do forced actions.
2278 */
2279 if ( !fFFDone
2280 && RT_SUCCESS(rc)
2281 && rc != VINF_EM_TERMINATE
2282 && rc != VINF_EM_OFF
2283 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2284 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2285 {
2286 rc = emR3ForcedActions(pVM, pVCpu, rc);
2287 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2288 if ( ( rc == VINF_EM_RESCHEDULE_REM
2289 || rc == VINF_EM_RESCHEDULE_HM)
2290 && pVCpu->em.s.fForceRAW)
2291 rc = VINF_EM_RESCHEDULE_RAW;
2292 }
2293 else if (fFFDone)
2294 fFFDone = false;
2295
2296 /*
2297 * Now what to do?
2298 */
2299 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2300 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2301 switch (rc)
2302 {
2303 /*
2304 * Keep doing what we're currently doing.
2305 */
2306 case VINF_SUCCESS:
2307 break;
2308
2309 /*
2310 * Reschedule - to raw-mode execution.
2311 */
2312 case VINF_EM_RESCHEDULE_RAW:
2313 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2314 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2315 pVCpu->em.s.enmState = EMSTATE_RAW;
2316 break;
2317
2318 /*
2319 * Reschedule - to hardware accelerated raw-mode execution.
2320 */
2321 case VINF_EM_RESCHEDULE_HM:
2322 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2323 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2324 Assert(!pVCpu->em.s.fForceRAW);
2325 pVCpu->em.s.enmState = EMSTATE_HM;
2326 break;
2327
2328 /*
2329 * Reschedule - to recompiled execution.
2330 */
2331 case VINF_EM_RESCHEDULE_REM:
2332 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2333 if (HMIsEnabled(pVM))
2334 {
2335 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2336 enmOldState, EMSTATE_IEM_THEN_REM));
2337 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2338 {
2339 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2340 pVCpu->em.s.cIemThenRemInstructions = 0;
2341 }
2342 }
2343 else
2344 {
2345 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2346 pVCpu->em.s.enmState = EMSTATE_REM;
2347 }
2348 break;
2349
2350 /*
2351 * Resume.
2352 */
2353 case VINF_EM_RESUME:
2354 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2355 /* Don't reschedule in the halted or wait for SIPI case. */
2356 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2357 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2358 {
2359 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2360 break;
2361 }
2362 /* fall through and get scheduled. */
2363 RT_FALL_THRU();
2364
2365 /*
2366 * Reschedule.
2367 */
2368 case VINF_EM_RESCHEDULE:
2369 {
2370 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2371 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2372 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2373 pVCpu->em.s.cIemThenRemInstructions = 0;
2374 pVCpu->em.s.enmState = enmState;
2375 break;
2376 }
2377
2378 /*
2379 * Halted.
2380 */
2381 case VINF_EM_HALT:
2382 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2383 pVCpu->em.s.enmState = EMSTATE_HALTED;
2384 break;
2385
2386 /*
2387 * Switch to the wait for SIPI state (application processor only)
2388 */
2389 case VINF_EM_WAIT_SIPI:
2390 Assert(pVCpu->idCpu != 0);
2391 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2392 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2393 break;
2394
2395
2396 /*
2397 * Suspend.
2398 */
2399 case VINF_EM_SUSPEND:
2400 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2401 Assert(enmOldState != EMSTATE_SUSPENDED);
2402 pVCpu->em.s.enmPrevState = enmOldState;
2403 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2404 break;
2405
2406 /*
2407 * Reset.
2408 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2409 */
2410 case VINF_EM_RESET:
2411 {
2412 if (pVCpu->idCpu == 0)
2413 {
2414 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2415 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2416 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2417 pVCpu->em.s.cIemThenRemInstructions = 0;
2418 pVCpu->em.s.enmState = enmState;
2419 }
2420 else
2421 {
2422 /* All other VCPUs go into the wait for SIPI state. */
2423 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2424 }
2425 break;
2426 }
2427
2428 /*
2429 * Power Off.
2430 */
2431 case VINF_EM_OFF:
2432 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2433 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2434 TMR3NotifySuspend(pVM, pVCpu);
2435 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2436 return rc;
2437
2438 /*
2439 * Terminate the VM.
2440 */
2441 case VINF_EM_TERMINATE:
2442 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2443 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2444 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2445 TMR3NotifySuspend(pVM, pVCpu);
2446 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2447 return rc;
2448
2449
2450 /*
2451 * Out of memory, suspend the VM and stuff.
2452 */
2453 case VINF_EM_NO_MEMORY:
2454 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2455 Assert(enmOldState != EMSTATE_SUSPENDED);
2456 pVCpu->em.s.enmPrevState = enmOldState;
2457 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2458 TMR3NotifySuspend(pVM, pVCpu);
2459 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2460
2461 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2462 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2463 if (rc != VINF_EM_SUSPEND)
2464 {
2465 if (RT_SUCCESS_NP(rc))
2466 {
2467 AssertLogRelMsgFailed(("%Rrc\n", rc));
2468 rc = VERR_EM_INTERNAL_ERROR;
2469 }
2470 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2471 }
2472 return rc;
2473
2474 /*
2475 * Guest debug events.
2476 */
2477 case VINF_EM_DBG_STEPPED:
2478 case VINF_EM_DBG_STOP:
2479 case VINF_EM_DBG_EVENT:
2480 case VINF_EM_DBG_BREAKPOINT:
2481 case VINF_EM_DBG_STEP:
2482 if (enmOldState == EMSTATE_RAW)
2483 {
2484 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2485 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2486 }
2487 else if (enmOldState == EMSTATE_HM)
2488 {
2489 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2490 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2491 }
2492 else if (enmOldState == EMSTATE_REM)
2493 {
2494 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2495 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2496 }
2497 else
2498 {
2499 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2500 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2501 }
2502 break;
2503
2504 /*
2505 * Hypervisor debug events.
2506 */
2507 case VINF_EM_DBG_HYPER_STEPPED:
2508 case VINF_EM_DBG_HYPER_BREAKPOINT:
2509 case VINF_EM_DBG_HYPER_ASSERTION:
2510 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2511 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2512 break;
2513
2514 /*
2515 * Triple fault.
2516 */
2517 case VINF_EM_TRIPLE_FAULT:
2518 if (!pVM->em.s.fGuruOnTripleFault)
2519 {
2520 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2521 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2522 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2523 continue;
2524 }
2525 /* Else fall through and trigger a guru. */
2526 RT_FALL_THRU();
2527
2528 case VERR_VMM_RING0_ASSERTION:
2529 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2530 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2531 break;
2532
2533 /*
2534 * Any error code showing up here other than the ones we
2535 * know and process above are considered to be FATAL.
2536 *
2537 * Unknown warnings and informational status codes are also
2538 * included in this.
2539 */
2540 default:
2541 if (RT_SUCCESS_NP(rc))
2542 {
2543 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2544 rc = VERR_EM_INTERNAL_ERROR;
2545 }
2546 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2547 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2548 break;
2549 }
2550
2551 /*
2552 * Act on state transition.
2553 */
2554 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2555 if (enmOldState != enmNewState)
2556 {
2557 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2558
2559 /* Clear MWait flags and the unhalt FF. */
2560 if ( enmOldState == EMSTATE_HALTED
2561 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2562 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2563 && ( enmNewState == EMSTATE_RAW
2564 || enmNewState == EMSTATE_HM
2565 || enmNewState == EMSTATE_REM
2566 || enmNewState == EMSTATE_IEM_THEN_REM
2567 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2568 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2569 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2570 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2571 {
2572 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2573 {
2574 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2575 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2576 }
2577 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2578 {
2579 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2580 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2581 }
2582 }
2583 }
2584 else
2585 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2586
2587 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2588 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2589
2590 /*
2591 * Act on the new state.
2592 */
2593 switch (enmNewState)
2594 {
2595 /*
2596 * Execute raw.
2597 */
2598 case EMSTATE_RAW:
2599#ifdef VBOX_WITH_RAW_MODE
2600 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2601#else
2602 AssertLogRelMsgFailed(("%Rrc\n", rc));
2603 rc = VERR_EM_INTERNAL_ERROR;
2604#endif
2605 break;
2606
2607 /*
2608 * Execute hardware accelerated raw.
2609 */
2610 case EMSTATE_HM:
2611 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2612 break;
2613
2614 /*
2615 * Execute recompiled.
2616 */
2617 case EMSTATE_REM:
2618 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2619 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2620 break;
2621
2622 /*
2623 * Execute in the interpreter.
2624 */
2625 case EMSTATE_IEM:
2626 {
2627#if 0 /* For testing purposes. */
2628 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2629 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2630 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2631 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2632 rc = VINF_SUCCESS;
2633 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2634#endif
2635 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2636 if (pVM->em.s.fIemExecutesAll)
2637 {
2638 Assert(rc != VINF_EM_RESCHEDULE_REM);
2639 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2640 Assert(rc != VINF_EM_RESCHEDULE_HM);
2641 }
2642 fFFDone = false;
2643 break;
2644 }
2645
2646 /*
2647 * Execute in IEM, hoping we can quickly switch aback to HM
2648 * or RAW execution. If our hopes fail, we go to REM.
2649 */
2650 case EMSTATE_IEM_THEN_REM:
2651 {
2652 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2653 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2654 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2655 break;
2656 }
2657
2658 /*
2659 * Application processor execution halted until SIPI.
2660 */
2661 case EMSTATE_WAIT_SIPI:
2662 /* no break */
2663 /*
2664 * hlt - execution halted until interrupt.
2665 */
2666 case EMSTATE_HALTED:
2667 {
2668 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2669 /* If HM (or someone else) store a pending interrupt in
2670 TRPM, it must be dispatched ASAP without any halting.
2671 Anything pending in TRPM has been accepted and the CPU
2672 should already be the right state to receive it. */
2673 if (TRPMHasTrap(pVCpu))
2674 rc = VINF_EM_RESCHEDULE;
2675 /* MWAIT has a special extension where it's woken up when
2676 an interrupt is pending even when IF=0. */
2677 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2678 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2679 {
2680 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2681 if (rc == VINF_SUCCESS)
2682 {
2683 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2684 APICUpdatePendingInterrupts(pVCpu);
2685
2686 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2687 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2688 {
2689 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2690 rc = VINF_EM_RESCHEDULE;
2691 }
2692 }
2693 }
2694 else
2695 {
2696 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2697 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2698 check VMCPU_FF_UPDATE_APIC here. */
2699 if ( rc == VINF_SUCCESS
2700 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2701 {
2702 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2703 rc = VINF_EM_RESCHEDULE;
2704 }
2705 }
2706
2707 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2708 break;
2709 }
2710
2711 /*
2712 * Suspended - return to VM.cpp.
2713 */
2714 case EMSTATE_SUSPENDED:
2715 TMR3NotifySuspend(pVM, pVCpu);
2716 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2717 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2718 return VINF_EM_SUSPEND;
2719
2720 /*
2721 * Debugging in the guest.
2722 */
2723 case EMSTATE_DEBUG_GUEST_RAW:
2724 case EMSTATE_DEBUG_GUEST_HM:
2725 case EMSTATE_DEBUG_GUEST_IEM:
2726 case EMSTATE_DEBUG_GUEST_REM:
2727 TMR3NotifySuspend(pVM, pVCpu);
2728 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2729 TMR3NotifyResume(pVM, pVCpu);
2730 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2731 break;
2732
2733 /*
2734 * Debugging in the hypervisor.
2735 */
2736 case EMSTATE_DEBUG_HYPER:
2737 {
2738 TMR3NotifySuspend(pVM, pVCpu);
2739 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2740
2741 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2742 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2743 if (rc != VINF_SUCCESS)
2744 {
2745 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2746 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2747 else
2748 {
2749 /* switch to guru meditation mode */
2750 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2751 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2752 VMMR3FatalDump(pVM, pVCpu, rc);
2753 }
2754 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2755 return rc;
2756 }
2757
2758 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2759 TMR3NotifyResume(pVM, pVCpu);
2760 break;
2761 }
2762
2763 /*
2764 * Guru meditation takes place in the debugger.
2765 */
2766 case EMSTATE_GURU_MEDITATION:
2767 {
2768 TMR3NotifySuspend(pVM, pVCpu);
2769 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2770 VMMR3FatalDump(pVM, pVCpu, rc);
2771 emR3Debug(pVM, pVCpu, rc);
2772 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2773 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2774 return rc;
2775 }
2776
2777 /*
2778 * The states we don't expect here.
2779 */
2780 case EMSTATE_NONE:
2781 case EMSTATE_TERMINATING:
2782 default:
2783 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2784 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2785 TMR3NotifySuspend(pVM, pVCpu);
2786 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2787 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2788 return VERR_EM_INTERNAL_ERROR;
2789 }
2790 } /* The Outer Main Loop */
2791 }
2792 else
2793 {
2794 /*
2795 * Fatal error.
2796 */
2797 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2798 TMR3NotifySuspend(pVM, pVCpu);
2799 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2800 VMMR3FatalDump(pVM, pVCpu, rc);
2801 emR3Debug(pVM, pVCpu, rc);
2802 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2803 /** @todo change the VM state! */
2804 return rc;
2805 }
2806
2807 /* not reached */
2808}
2809
2810/**
2811 * Notify EM of a state change (used by FTM)
2812 *
2813 * @param pVM The cross context VM structure.
2814 */
2815VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2816{
2817 PVMCPU pVCpu = VMMGetCpu(pVM);
2818
2819 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2820 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2821 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2822 return VINF_SUCCESS;
2823}
2824
2825/**
2826 * Notify EM of a state change (used by FTM)
2827 *
2828 * @param pVM The cross context VM structure.
2829 */
2830VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2831{
2832 PVMCPU pVCpu = VMMGetCpu(pVM);
2833 EMSTATE enmCurState = pVCpu->em.s.enmState;
2834
2835 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2836 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2837 pVCpu->em.s.enmPrevState = enmCurState;
2838 return VINF_SUCCESS;
2839}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette