VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 70979

Last change on this file since 70979 was 70979, checked in by vboxsync, 7 years ago

NEM: Working on the EM loops. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 133.2 KB
Line 
1/* $Id: EM.cpp 70979 2018-02-13 01:38:48Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/iom.h>
47#include <VBox/vmm/dbgf.h>
48#include <VBox/vmm/pgm.h>
49#ifdef VBOX_WITH_REM
50# include <VBox/vmm/rem.h>
51#endif
52#include <VBox/vmm/apic.h>
53#include <VBox/vmm/tm.h>
54#include <VBox/vmm/mm.h>
55#include <VBox/vmm/ssm.h>
56#include <VBox/vmm/pdmapi.h>
57#include <VBox/vmm/pdmcritsect.h>
58#include <VBox/vmm/pdmqueue.h>
59#include <VBox/vmm/hm.h>
60#include <VBox/vmm/patm.h>
61#include "EMInternal.h"
62#include <VBox/vmm/vm.h>
63#include <VBox/vmm/uvm.h>
64#include <VBox/vmm/cpumdis.h>
65#include <VBox/dis.h>
66#include <VBox/disopcode.h>
67#include "VMMTracing.h"
68
69#include <iprt/asm.h>
70#include <iprt/string.h>
71#include <iprt/stream.h>
72#include <iprt/thread.h>
73
74
75/*********************************************************************************************************************************
76* Defined Constants And Macros *
77*********************************************************************************************************************************/
78#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
79#define EM_NOTIFY_HM
80#endif
81
82
83/*********************************************************************************************************************************
84* Internal Functions *
85*********************************************************************************************************************************/
86static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
87static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
88#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
89static const char *emR3GetStateName(EMSTATE enmState);
90#endif
91static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
92#if defined(VBOX_WITH_REM) || defined(DEBUG)
93static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
94#endif
95static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
96int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
97
98
99/**
100 * Initializes the EM.
101 *
102 * @returns VBox status code.
103 * @param pVM The cross context VM structure.
104 */
105VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
106{
107 LogFlow(("EMR3Init\n"));
108 /*
109 * Assert alignment and sizes.
110 */
111 AssertCompileMemberAlignment(VM, em.s, 32);
112 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
113 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
114
115 /*
116 * Init the structure.
117 */
118 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
119 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
120 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
121
122 bool fEnabled;
123 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
124 AssertLogRelRCReturn(rc, rc);
125 pVM->fRecompileUser = !fEnabled;
126
127 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
128 AssertLogRelRCReturn(rc, rc);
129 pVM->fRecompileSupervisor = !fEnabled;
130
131#ifdef VBOX_WITH_RAW_RING1
132 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
133 AssertLogRelRCReturn(rc, rc);
134#else
135 pVM->fRawRing1Enabled = false; /* Disabled by default. */
136#endif
137
138 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
139 AssertLogRelRCReturn(rc, rc);
140
141 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
142 AssertLogRelRCReturn(rc, rc);
143 pVM->em.s.fGuruOnTripleFault = !fEnabled;
144 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
145 {
146 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
147 pVM->em.s.fGuruOnTripleFault = true;
148 }
149
150 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
151 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
152
153#ifdef VBOX_WITH_REM
154 /*
155 * Initialize the REM critical section.
156 */
157 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
158 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
159 AssertRCReturn(rc, rc);
160#endif
161
162 /*
163 * Saved state.
164 */
165 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
166 NULL, NULL, NULL,
167 NULL, emR3Save, NULL,
168 NULL, emR3Load, NULL);
169 if (RT_FAILURE(rc))
170 return rc;
171
172 for (VMCPUID i = 0; i < pVM->cCpus; i++)
173 {
174 PVMCPU pVCpu = &pVM->aCpus[i];
175
176 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
177 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
178 pVCpu->em.s.fForceRAW = false;
179
180 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
181#ifdef VBOX_WITH_RAW_MODE
182 if (VM_IS_RAW_MODE_ENABLED(pVM))
183 {
184 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
185 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
186 }
187#endif
188
189 /* Force reset of the time slice. */
190 pVCpu->em.s.u64TimeSliceStart = 0;
191
192# define EM_REG_COUNTER(a, b, c) \
193 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
194 AssertRC(rc);
195
196# define EM_REG_COUNTER_USED(a, b, c) \
197 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
198 AssertRC(rc);
199
200# define EM_REG_PROFILE(a, b, c) \
201 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
202 AssertRC(rc);
203
204# define EM_REG_PROFILE_ADV(a, b, c) \
205 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
206 AssertRC(rc);
207
208 /*
209 * Statistics.
210 */
211#ifdef VBOX_WITH_STATISTICS
212 PEMSTATS pStats;
213 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
214 if (RT_FAILURE(rc))
215 return rc;
216
217 pVCpu->em.s.pStatsR3 = pStats;
218 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
219 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
220
221 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
222 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
223
224 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
225 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226
227 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301
302 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
303 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
304
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391
392 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
393 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
395 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
397 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
398 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
418
419 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
420 pVCpu->em.s.pCliStatTree = 0;
421
422 /* these should be considered for release statistics. */
423 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
424 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
425 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
427 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
428 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
430 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
433 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
435 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
436 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
439
440#endif /* VBOX_WITH_STATISTICS */
441
442 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
443 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
444 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
445 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
446 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
447
448 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
449 }
450
451 emR3InitDbg(pVM);
452 return VINF_SUCCESS;
453}
454
455
456/**
457 * Applies relocations to data and code managed by this
458 * component. This function will be called at init and
459 * whenever the VMM need to relocate it self inside the GC.
460 *
461 * @param pVM The cross context VM structure.
462 */
463VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
464{
465 LogFlow(("EMR3Relocate\n"));
466 for (VMCPUID i = 0; i < pVM->cCpus; i++)
467 {
468 PVMCPU pVCpu = &pVM->aCpus[i];
469 if (pVCpu->em.s.pStatsR3)
470 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
471 }
472}
473
474
475/**
476 * Reset the EM state for a CPU.
477 *
478 * Called by EMR3Reset and hot plugging.
479 *
480 * @param pVCpu The cross context virtual CPU structure.
481 */
482VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
483{
484 /* Reset scheduling state. */
485 pVCpu->em.s.fForceRAW = false;
486 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
487
488 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
489 out of the HALTED state here so that enmPrevState doesn't end up as
490 HALTED when EMR3Execute returns. */
491 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
492 {
493 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
494 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
495 }
496}
497
498
499/**
500 * Reset notification.
501 *
502 * @param pVM The cross context VM structure.
503 */
504VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
505{
506 Log(("EMR3Reset: \n"));
507 for (VMCPUID i = 0; i < pVM->cCpus; i++)
508 EMR3ResetCpu(&pVM->aCpus[i]);
509}
510
511
512/**
513 * Terminates the EM.
514 *
515 * Termination means cleaning up and freeing all resources,
516 * the VM it self is at this point powered off or suspended.
517 *
518 * @returns VBox status code.
519 * @param pVM The cross context VM structure.
520 */
521VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
522{
523 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
524
525#ifdef VBOX_WITH_REM
526 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
527#else
528 RT_NOREF(pVM);
529#endif
530 return VINF_SUCCESS;
531}
532
533
534/**
535 * Execute state save operation.
536 *
537 * @returns VBox status code.
538 * @param pVM The cross context VM structure.
539 * @param pSSM SSM operation handle.
540 */
541static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
542{
543 for (VMCPUID i = 0; i < pVM->cCpus; i++)
544 {
545 PVMCPU pVCpu = &pVM->aCpus[i];
546
547 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
548 AssertRCReturn(rc, rc);
549
550 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
551 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
552 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
553 AssertRCReturn(rc, rc);
554
555 /* Save mwait state. */
556 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
557 AssertRCReturn(rc, rc);
558 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
559 AssertRCReturn(rc, rc);
560 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
561 AssertRCReturn(rc, rc);
562 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
563 AssertRCReturn(rc, rc);
564 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
565 AssertRCReturn(rc, rc);
566 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
567 AssertRCReturn(rc, rc);
568 }
569 return VINF_SUCCESS;
570}
571
572
573/**
574 * Execute state load operation.
575 *
576 * @returns VBox status code.
577 * @param pVM The cross context VM structure.
578 * @param pSSM SSM operation handle.
579 * @param uVersion Data layout version.
580 * @param uPass The data pass.
581 */
582static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
583{
584 /*
585 * Validate version.
586 */
587 if ( uVersion > EM_SAVED_STATE_VERSION
588 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
589 {
590 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
591 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
592 }
593 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
594
595 /*
596 * Load the saved state.
597 */
598 for (VMCPUID i = 0; i < pVM->cCpus; i++)
599 {
600 PVMCPU pVCpu = &pVM->aCpus[i];
601
602 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
603 if (RT_FAILURE(rc))
604 pVCpu->em.s.fForceRAW = false;
605 AssertRCReturn(rc, rc);
606
607 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
608 {
609 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
610 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
611 AssertRCReturn(rc, rc);
612 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
613
614 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
615 }
616 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
617 {
618 /* Load mwait state. */
619 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
620 AssertRCReturn(rc, rc);
621 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
622 AssertRCReturn(rc, rc);
623 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
624 AssertRCReturn(rc, rc);
625 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
626 AssertRCReturn(rc, rc);
627 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
628 AssertRCReturn(rc, rc);
629 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
630 AssertRCReturn(rc, rc);
631 }
632
633 Assert(!pVCpu->em.s.pCliStatTree);
634 }
635 return VINF_SUCCESS;
636}
637
638
639/**
640 * Argument packet for emR3SetExecutionPolicy.
641 */
642struct EMR3SETEXECPOLICYARGS
643{
644 EMEXECPOLICY enmPolicy;
645 bool fEnforce;
646};
647
648
649/**
650 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
651 */
652static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
653{
654 /*
655 * Only the first CPU changes the variables.
656 */
657 if (pVCpu->idCpu == 0)
658 {
659 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
660 switch (pArgs->enmPolicy)
661 {
662 case EMEXECPOLICY_RECOMPILE_RING0:
663 pVM->fRecompileSupervisor = pArgs->fEnforce;
664 break;
665 case EMEXECPOLICY_RECOMPILE_RING3:
666 pVM->fRecompileUser = pArgs->fEnforce;
667 break;
668 case EMEXECPOLICY_IEM_ALL:
669 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
670 break;
671 default:
672 AssertFailedReturn(VERR_INVALID_PARAMETER);
673 }
674 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
675 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
676 }
677
678 /*
679 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
680 */
681 return pVCpu->em.s.enmState == EMSTATE_RAW
682 || pVCpu->em.s.enmState == EMSTATE_HM
683 || pVCpu->em.s.enmState == EMSTATE_NEM
684 || pVCpu->em.s.enmState == EMSTATE_IEM
685 || pVCpu->em.s.enmState == EMSTATE_REM
686 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
687 ? VINF_EM_RESCHEDULE
688 : VINF_SUCCESS;
689}
690
691
692/**
693 * Changes an execution scheduling policy parameter.
694 *
695 * This is used to enable or disable raw-mode / hardware-virtualization
696 * execution of user and supervisor code.
697 *
698 * @returns VINF_SUCCESS on success.
699 * @returns VINF_RESCHEDULE if a rescheduling might be required.
700 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
701 *
702 * @param pUVM The user mode VM handle.
703 * @param enmPolicy The scheduling policy to change.
704 * @param fEnforce Whether to enforce the policy or not.
705 */
706VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
707{
708 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
709 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
710 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
711
712 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
713 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
714}
715
716
717/**
718 * Queries an execution scheduling policy parameter.
719 *
720 * @returns VBox status code
721 * @param pUVM The user mode VM handle.
722 * @param enmPolicy The scheduling policy to query.
723 * @param pfEnforced Where to return the current value.
724 */
725VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
726{
727 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
728 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
729 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
730 PVM pVM = pUVM->pVM;
731 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
732
733 /* No need to bother EMTs with a query. */
734 switch (enmPolicy)
735 {
736 case EMEXECPOLICY_RECOMPILE_RING0:
737 *pfEnforced = pVM->fRecompileSupervisor;
738 break;
739 case EMEXECPOLICY_RECOMPILE_RING3:
740 *pfEnforced = pVM->fRecompileUser;
741 break;
742 case EMEXECPOLICY_IEM_ALL:
743 *pfEnforced = pVM->em.s.fIemExecutesAll;
744 break;
745 default:
746 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
747 }
748
749 return VINF_SUCCESS;
750}
751
752
753/**
754 * Raise a fatal error.
755 *
756 * Safely terminate the VM with full state report and stuff. This function
757 * will naturally never return.
758 *
759 * @param pVCpu The cross context virtual CPU structure.
760 * @param rc VBox status code.
761 */
762VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
763{
764 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
765 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
766}
767
768
769#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
770/**
771 * Gets the EM state name.
772 *
773 * @returns pointer to read only state name,
774 * @param enmState The state.
775 */
776static const char *emR3GetStateName(EMSTATE enmState)
777{
778 switch (enmState)
779 {
780 case EMSTATE_NONE: return "EMSTATE_NONE";
781 case EMSTATE_RAW: return "EMSTATE_RAW";
782 case EMSTATE_HM: return "EMSTATE_HM";
783 case EMSTATE_IEM: return "EMSTATE_IEM";
784 case EMSTATE_REM: return "EMSTATE_REM";
785 case EMSTATE_HALTED: return "EMSTATE_HALTED";
786 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
787 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
788 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
789 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
790 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
791 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
792 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
793 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
794 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
795 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
796 case EMSTATE_NEM: return "EMSTATE_NEM";
797 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
798 default: return "Unknown!";
799 }
800}
801#endif /* LOG_ENABLED || VBOX_STRICT */
802
803
804/**
805 * Debug loop.
806 *
807 * @returns VBox status code for EM.
808 * @param pVM The cross context VM structure.
809 * @param pVCpu The cross context virtual CPU structure.
810 * @param rc Current EM VBox status code.
811 */
812static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
813{
814 for (;;)
815 {
816 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
817 const VBOXSTRICTRC rcLast = rc;
818
819 /*
820 * Debug related RC.
821 */
822 switch (VBOXSTRICTRC_VAL(rc))
823 {
824 /*
825 * Single step an instruction.
826 */
827 case VINF_EM_DBG_STEP:
828 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
829 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
830 || pVCpu->em.s.fForceRAW /* paranoia */)
831#ifdef VBOX_WITH_RAW_MODE
832 rc = emR3RawStep(pVM, pVCpu);
833#else
834 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
835#endif
836 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
837 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
838 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
839 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
840#ifdef VBOX_WITH_REM
841 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
842 rc = emR3RemStep(pVM, pVCpu);
843#endif
844 else
845 {
846 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
847 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
848 rc = VINF_EM_DBG_STEPPED;
849 }
850 break;
851
852 /*
853 * Simple events: stepped, breakpoint, stop/assertion.
854 */
855 case VINF_EM_DBG_STEPPED:
856 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
857 break;
858
859 case VINF_EM_DBG_BREAKPOINT:
860 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
861 break;
862
863 case VINF_EM_DBG_STOP:
864 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
865 break;
866
867 case VINF_EM_DBG_EVENT:
868 rc = DBGFR3EventHandlePending(pVM, pVCpu);
869 break;
870
871 case VINF_EM_DBG_HYPER_STEPPED:
872 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
873 break;
874
875 case VINF_EM_DBG_HYPER_BREAKPOINT:
876 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
877 break;
878
879 case VINF_EM_DBG_HYPER_ASSERTION:
880 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
881 RTLogFlush(NULL);
882 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
883 break;
884
885 /*
886 * Guru meditation.
887 */
888 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
889 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
890 break;
891 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
892 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
893 break;
894 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
895 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
896 break;
897
898 default: /** @todo don't use default for guru, but make special errors code! */
899 {
900 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
901 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
902 break;
903 }
904 }
905
906 /*
907 * Process the result.
908 */
909 switch (VBOXSTRICTRC_VAL(rc))
910 {
911 /*
912 * Continue the debugging loop.
913 */
914 case VINF_EM_DBG_STEP:
915 case VINF_EM_DBG_STOP:
916 case VINF_EM_DBG_EVENT:
917 case VINF_EM_DBG_STEPPED:
918 case VINF_EM_DBG_BREAKPOINT:
919 case VINF_EM_DBG_HYPER_STEPPED:
920 case VINF_EM_DBG_HYPER_BREAKPOINT:
921 case VINF_EM_DBG_HYPER_ASSERTION:
922 break;
923
924 /*
925 * Resuming execution (in some form) has to be done here if we got
926 * a hypervisor debug event.
927 */
928 case VINF_SUCCESS:
929 case VINF_EM_RESUME:
930 case VINF_EM_SUSPEND:
931 case VINF_EM_RESCHEDULE:
932 case VINF_EM_RESCHEDULE_RAW:
933 case VINF_EM_RESCHEDULE_REM:
934 case VINF_EM_HALT:
935 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
936 {
937#ifdef VBOX_WITH_RAW_MODE
938 rc = emR3RawResumeHyper(pVM, pVCpu);
939 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
940 continue;
941#else
942 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
943#endif
944 }
945 if (rc == VINF_SUCCESS)
946 rc = VINF_EM_RESCHEDULE;
947 return rc;
948
949 /*
950 * The debugger isn't attached.
951 * We'll simply turn the thing off since that's the easiest thing to do.
952 */
953 case VERR_DBGF_NOT_ATTACHED:
954 switch (VBOXSTRICTRC_VAL(rcLast))
955 {
956 case VINF_EM_DBG_HYPER_STEPPED:
957 case VINF_EM_DBG_HYPER_BREAKPOINT:
958 case VINF_EM_DBG_HYPER_ASSERTION:
959 case VERR_TRPM_PANIC:
960 case VERR_TRPM_DONT_PANIC:
961 case VERR_VMM_RING0_ASSERTION:
962 case VERR_VMM_HYPER_CR3_MISMATCH:
963 case VERR_VMM_RING3_CALL_DISABLED:
964 return rcLast;
965 }
966 return VINF_EM_OFF;
967
968 /*
969 * Status codes terminating the VM in one or another sense.
970 */
971 case VINF_EM_TERMINATE:
972 case VINF_EM_OFF:
973 case VINF_EM_RESET:
974 case VINF_EM_NO_MEMORY:
975 case VINF_EM_RAW_STALE_SELECTOR:
976 case VINF_EM_RAW_IRET_TRAP:
977 case VERR_TRPM_PANIC:
978 case VERR_TRPM_DONT_PANIC:
979 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
980 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
981 case VERR_VMM_RING0_ASSERTION:
982 case VERR_VMM_HYPER_CR3_MISMATCH:
983 case VERR_VMM_RING3_CALL_DISABLED:
984 case VERR_INTERNAL_ERROR:
985 case VERR_INTERNAL_ERROR_2:
986 case VERR_INTERNAL_ERROR_3:
987 case VERR_INTERNAL_ERROR_4:
988 case VERR_INTERNAL_ERROR_5:
989 case VERR_IPE_UNEXPECTED_STATUS:
990 case VERR_IPE_UNEXPECTED_INFO_STATUS:
991 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
992 return rc;
993
994 /*
995 * The rest is unexpected, and will keep us here.
996 */
997 default:
998 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
999 break;
1000 }
1001 } /* debug for ever */
1002}
1003
1004
1005#if defined(VBOX_WITH_REM) || defined(DEBUG)
1006/**
1007 * Steps recompiled code.
1008 *
1009 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1010 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1011 *
1012 * @param pVM The cross context VM structure.
1013 * @param pVCpu The cross context virtual CPU structure.
1014 */
1015static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1016{
1017 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1018
1019# ifdef VBOX_WITH_REM
1020 EMRemLock(pVM);
1021
1022 /*
1023 * Switch to REM, step instruction, switch back.
1024 */
1025 int rc = REMR3State(pVM, pVCpu);
1026 if (RT_SUCCESS(rc))
1027 {
1028 rc = REMR3Step(pVM, pVCpu);
1029 REMR3StateBack(pVM, pVCpu);
1030 }
1031 EMRemUnlock(pVM);
1032
1033# else
1034 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1035# endif
1036
1037 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1038 return rc;
1039}
1040#endif /* VBOX_WITH_REM || DEBUG */
1041
1042
1043#ifdef VBOX_WITH_REM
1044/**
1045 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1046 * critical section.
1047 *
1048 * @returns false - new fInREMState value.
1049 * @param pVM The cross context VM structure.
1050 * @param pVCpu The cross context virtual CPU structure.
1051 */
1052DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1053{
1054 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1055 REMR3StateBack(pVM, pVCpu);
1056 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1057
1058 EMRemUnlock(pVM);
1059 return false;
1060}
1061#endif
1062
1063
1064/**
1065 * Executes recompiled code.
1066 *
1067 * This function contains the recompiler version of the inner
1068 * execution loop (the outer loop being in EMR3ExecuteVM()).
1069 *
1070 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1071 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1072 *
1073 * @param pVM The cross context VM structure.
1074 * @param pVCpu The cross context virtual CPU structure.
1075 * @param pfFFDone Where to store an indicator telling whether or not
1076 * FFs were done before returning.
1077 *
1078 */
1079static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1080{
1081#ifdef LOG_ENABLED
1082 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1083 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1084
1085 if (pCtx->eflags.Bits.u1VM)
1086 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1087 else
1088 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1089#endif
1090 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1091
1092#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1093 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1094 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1095 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1096#endif
1097
1098 /*
1099 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1100 * or the REM suggests raw-mode execution.
1101 */
1102 *pfFFDone = false;
1103#ifdef VBOX_WITH_REM
1104 bool fInREMState = false;
1105#else
1106 uint32_t cLoops = 0;
1107#endif
1108 int rc = VINF_SUCCESS;
1109 for (;;)
1110 {
1111#ifdef VBOX_WITH_REM
1112 /*
1113 * Lock REM and update the state if not already in sync.
1114 *
1115 * Note! Big lock, but you are not supposed to own any lock when
1116 * coming in here.
1117 */
1118 if (!fInREMState)
1119 {
1120 EMRemLock(pVM);
1121 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1122
1123 /* Flush the recompiler translation blocks if the VCPU has changed,
1124 also force a full CPU state resync. */
1125 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1126 {
1127 REMFlushTBs(pVM);
1128 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1129 }
1130 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1131
1132 rc = REMR3State(pVM, pVCpu);
1133
1134 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1135 if (RT_FAILURE(rc))
1136 break;
1137 fInREMState = true;
1138
1139 /*
1140 * We might have missed the raising of VMREQ, TIMER and some other
1141 * important FFs while we were busy switching the state. So, check again.
1142 */
1143 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1144 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1145 {
1146 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1147 goto l_REMDoForcedActions;
1148 }
1149 }
1150#endif
1151
1152 /*
1153 * Execute REM.
1154 */
1155 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1156 {
1157 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1158#ifdef VBOX_WITH_REM
1159 rc = REMR3Run(pVM, pVCpu);
1160#else
1161 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1162#endif
1163 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1164 }
1165 else
1166 {
1167 /* Give up this time slice; virtual time continues */
1168 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1169 RTThreadSleep(5);
1170 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1171 rc = VINF_SUCCESS;
1172 }
1173
1174 /*
1175 * Deal with high priority post execution FFs before doing anything
1176 * else. Sync back the state and leave the lock to be on the safe side.
1177 */
1178 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1179 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1180 {
1181#ifdef VBOX_WITH_REM
1182 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1183#endif
1184 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1185 }
1186
1187 /*
1188 * Process the returned status code.
1189 */
1190 if (rc != VINF_SUCCESS)
1191 {
1192 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1193 break;
1194 if (rc != VINF_REM_INTERRUPED_FF)
1195 {
1196#ifndef VBOX_WITH_REM
1197 /* Try dodge unimplemented IEM trouble by reschduling. */
1198 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1199 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1200 {
1201 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1202 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1203 {
1204 rc = VINF_EM_RESCHEDULE;
1205 break;
1206 }
1207 }
1208#endif
1209
1210 /*
1211 * Anything which is not known to us means an internal error
1212 * and the termination of the VM!
1213 */
1214 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1215 break;
1216 }
1217 }
1218
1219
1220 /*
1221 * Check and execute forced actions.
1222 *
1223 * Sync back the VM state and leave the lock before calling any of
1224 * these, you never know what's going to happen here.
1225 */
1226#ifdef VBOX_HIGH_RES_TIMERS_HACK
1227 TMTimerPollVoid(pVM, pVCpu);
1228#endif
1229 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1230 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1231 || VMCPU_FF_IS_PENDING(pVCpu,
1232 VMCPU_FF_ALL_REM_MASK
1233 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1234 {
1235#ifdef VBOX_WITH_REM
1236l_REMDoForcedActions:
1237 if (fInREMState)
1238 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1239#endif
1240 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1241 rc = emR3ForcedActions(pVM, pVCpu, rc);
1242 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1243 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1244 if ( rc != VINF_SUCCESS
1245 && rc != VINF_EM_RESCHEDULE_REM)
1246 {
1247 *pfFFDone = true;
1248 break;
1249 }
1250 }
1251
1252#ifndef VBOX_WITH_REM
1253 /*
1254 * Have to check if we can get back to fast execution mode every so often.
1255 */
1256 if (!(++cLoops & 7))
1257 {
1258 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1259 if ( enmCheck != EMSTATE_REM
1260 && enmCheck != EMSTATE_IEM_THEN_REM)
1261 return VINF_EM_RESCHEDULE;
1262 }
1263#endif
1264
1265 } /* The Inner Loop, recompiled execution mode version. */
1266
1267
1268#ifdef VBOX_WITH_REM
1269 /*
1270 * Returning. Sync back the VM state if required.
1271 */
1272 if (fInREMState)
1273 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1274#endif
1275
1276 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1277 return rc;
1278}
1279
1280
1281#ifdef DEBUG
1282
1283int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1284{
1285 EMSTATE enmOldState = pVCpu->em.s.enmState;
1286
1287 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1288
1289 Log(("Single step BEGIN:\n"));
1290 for (uint32_t i = 0; i < cIterations; i++)
1291 {
1292 DBGFR3PrgStep(pVCpu);
1293 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1294 emR3RemStep(pVM, pVCpu);
1295 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1296 break;
1297 }
1298 Log(("Single step END:\n"));
1299 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1300 pVCpu->em.s.enmState = enmOldState;
1301 return VINF_EM_RESCHEDULE;
1302}
1303
1304#endif /* DEBUG */
1305
1306
1307/**
1308 * Try execute the problematic code in IEM first, then fall back on REM if there
1309 * is too much of it or if IEM doesn't implement something.
1310 *
1311 * @returns Strict VBox status code from IEMExecLots.
1312 * @param pVM The cross context VM structure.
1313 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1314 * @param pfFFDone Force flags done indicator.
1315 *
1316 * @thread EMT(pVCpu)
1317 */
1318static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1319{
1320 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1321 *pfFFDone = false;
1322
1323 /*
1324 * Execute in IEM for a while.
1325 */
1326 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1327 {
1328 uint32_t cInstructions;
1329 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1330 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1331 if (rcStrict != VINF_SUCCESS)
1332 {
1333 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1334 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1335 break;
1336
1337 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1338 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1339 return rcStrict;
1340 }
1341
1342 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1343 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1344 {
1345 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1346 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1347 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1348 pVCpu->em.s.enmState = enmNewState;
1349 return VINF_SUCCESS;
1350 }
1351
1352 /*
1353 * Check for pending actions.
1354 */
1355 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1356 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1357 return VINF_SUCCESS;
1358 }
1359
1360 /*
1361 * Switch to REM.
1362 */
1363 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1364 pVCpu->em.s.enmState = EMSTATE_REM;
1365 return VINF_SUCCESS;
1366}
1367
1368
1369/**
1370 * Decides whether to execute RAW, HWACC or REM.
1371 *
1372 * @returns new EM state
1373 * @param pVM The cross context VM structure.
1374 * @param pVCpu The cross context virtual CPU structure.
1375 * @param pCtx Pointer to the guest CPU context.
1376 */
1377EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1378{
1379 /*
1380 * When forcing raw-mode execution, things are simple.
1381 */
1382 if (pVCpu->em.s.fForceRAW)
1383 return EMSTATE_RAW;
1384
1385 /*
1386 * We stay in the wait for SIPI state unless explicitly told otherwise.
1387 */
1388 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1389 return EMSTATE_WAIT_SIPI;
1390
1391 /*
1392 * Execute everything in IEM?
1393 */
1394 if (pVM->em.s.fIemExecutesAll)
1395 return EMSTATE_IEM;
1396
1397 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1398 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1399 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1400
1401 X86EFLAGS EFlags = pCtx->eflags;
1402 /** @todo NEM: scheduling. */
1403 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1404 {
1405 /*
1406 * Hardware accelerated raw-mode:
1407 */
1408 if ( EMIsHwVirtExecutionEnabled(pVM)
1409 && HMR3CanExecuteGuest(pVM, pCtx))
1410 return EMSTATE_HM;
1411
1412 /*
1413 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1414 * turns off monitoring features essential for raw mode!
1415 */
1416 return EMSTATE_IEM_THEN_REM;
1417 }
1418
1419 /*
1420 * Standard raw-mode:
1421 *
1422 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1423 * or 32 bits protected mode ring 0 code
1424 *
1425 * The tests are ordered by the likelihood of being true during normal execution.
1426 */
1427 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1428 {
1429 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1430 return EMSTATE_REM;
1431 }
1432
1433# ifndef VBOX_RAW_V86
1434 if (EFlags.u32 & X86_EFL_VM) {
1435 Log2(("raw mode refused: VM_MASK\n"));
1436 return EMSTATE_REM;
1437 }
1438# endif
1439
1440 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1441 uint32_t u32CR0 = pCtx->cr0;
1442 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1443 {
1444 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1445 return EMSTATE_REM;
1446 }
1447
1448 if (pCtx->cr4 & X86_CR4_PAE)
1449 {
1450 uint32_t u32Dummy, u32Features;
1451
1452 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1453 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1454 return EMSTATE_REM;
1455 }
1456
1457 unsigned uSS = pCtx->ss.Sel;
1458 if ( pCtx->eflags.Bits.u1VM
1459 || (uSS & X86_SEL_RPL) == 3)
1460 {
1461 if (!EMIsRawRing3Enabled(pVM))
1462 return EMSTATE_REM;
1463
1464 if (!(EFlags.u32 & X86_EFL_IF))
1465 {
1466 Log2(("raw mode refused: IF (RawR3)\n"));
1467 return EMSTATE_REM;
1468 }
1469
1470 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1471 {
1472 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1473 return EMSTATE_REM;
1474 }
1475 }
1476 else
1477 {
1478 if (!EMIsRawRing0Enabled(pVM))
1479 return EMSTATE_REM;
1480
1481 if (EMIsRawRing1Enabled(pVM))
1482 {
1483 /* Only ring 0 and 1 supervisor code. */
1484 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1485 {
1486 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1487 return EMSTATE_REM;
1488 }
1489 }
1490 /* Only ring 0 supervisor code. */
1491 else if ((uSS & X86_SEL_RPL) != 0)
1492 {
1493 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1494 return EMSTATE_REM;
1495 }
1496
1497 // Let's start with pure 32 bits ring 0 code first
1498 /** @todo What's pure 32-bit mode? flat? */
1499 if ( !(pCtx->ss.Attr.n.u1DefBig)
1500 || !(pCtx->cs.Attr.n.u1DefBig))
1501 {
1502 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1503 return EMSTATE_REM;
1504 }
1505
1506 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1507 if (!(u32CR0 & X86_CR0_WP))
1508 {
1509 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1510 return EMSTATE_REM;
1511 }
1512
1513# ifdef VBOX_WITH_RAW_MODE
1514 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1515 {
1516 Log2(("raw r0 mode forced: patch code\n"));
1517# ifdef VBOX_WITH_SAFE_STR
1518 Assert(pCtx->tr.Sel);
1519# endif
1520 return EMSTATE_RAW;
1521 }
1522# endif /* VBOX_WITH_RAW_MODE */
1523
1524# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1525 if (!(EFlags.u32 & X86_EFL_IF))
1526 {
1527 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1528 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1529 return EMSTATE_REM;
1530 }
1531# endif
1532
1533# ifndef VBOX_WITH_RAW_RING1
1534 /** @todo still necessary??? */
1535 if (EFlags.Bits.u2IOPL != 0)
1536 {
1537 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1538 return EMSTATE_REM;
1539 }
1540# endif
1541 }
1542
1543 /*
1544 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1545 */
1546 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1547 {
1548 Log2(("raw mode refused: stale CS\n"));
1549 return EMSTATE_REM;
1550 }
1551 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1552 {
1553 Log2(("raw mode refused: stale SS\n"));
1554 return EMSTATE_REM;
1555 }
1556 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1557 {
1558 Log2(("raw mode refused: stale DS\n"));
1559 return EMSTATE_REM;
1560 }
1561 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1562 {
1563 Log2(("raw mode refused: stale ES\n"));
1564 return EMSTATE_REM;
1565 }
1566 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1567 {
1568 Log2(("raw mode refused: stale FS\n"));
1569 return EMSTATE_REM;
1570 }
1571 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1572 {
1573 Log2(("raw mode refused: stale GS\n"));
1574 return EMSTATE_REM;
1575 }
1576
1577# ifdef VBOX_WITH_SAFE_STR
1578 if (pCtx->tr.Sel == 0)
1579 {
1580 Log(("Raw mode refused -> TR=0\n"));
1581 return EMSTATE_REM;
1582 }
1583# endif
1584
1585 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1586 return EMSTATE_RAW;
1587}
1588
1589
1590/**
1591 * Executes all high priority post execution force actions.
1592 *
1593 * @returns rc or a fatal status code.
1594 *
1595 * @param pVM The cross context VM structure.
1596 * @param pVCpu The cross context virtual CPU structure.
1597 * @param rc The current rc.
1598 */
1599int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1600{
1601 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1602
1603 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1604 PDMCritSectBothFF(pVCpu);
1605
1606 /* Update CR3 (Nested Paging case for HM). */
1607 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1608 {
1609 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1610 if (RT_FAILURE(rc2))
1611 return rc2;
1612 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1613 }
1614
1615 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1616 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1617 {
1618 if (CPUMIsGuestInPAEMode(pVCpu))
1619 {
1620 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1621 AssertPtr(pPdpes);
1622
1623 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1624 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1625 }
1626 else
1627 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1628 }
1629
1630 /* IEM has pending work (typically memory write after INS instruction). */
1631 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1632 rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
1633
1634 /* IOM has pending work (comitting an I/O or MMIO write). */
1635 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1636 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1637
1638#ifdef VBOX_WITH_RAW_MODE
1639 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1640 CSAMR3DoPendingAction(pVM, pVCpu);
1641#endif
1642
1643 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1644 {
1645 if ( rc > VINF_EM_NO_MEMORY
1646 && rc <= VINF_EM_LAST)
1647 rc = VINF_EM_NO_MEMORY;
1648 }
1649
1650 return rc;
1651}
1652
1653#ifdef VBOX_WITH_NESTED_HWVIRT
1654/**
1655 * Helper for emR3ForcedActions() for injecting interrupts into the
1656 * nested-guest.
1657 *
1658 * @returns VBox status code.
1659 * @param pVCpu The cross context virtual CPU structure.
1660 * @param pCtx Pointer to the nested-guest CPU context.
1661 * @param pfResched Where to store whether a reschedule is required.
1662 * @param pfInject Where to store whether an interrupt was injected (and if
1663 * a wake up is pending).
1664 */
1665static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1666{
1667 *pfResched = false;
1668 *pfInject = false;
1669 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1670 {
1671 PVM pVM = pVCpu->CTX_SUFF(pVM);
1672 bool fGif = pCtx->hwvirt.fGif;
1673#ifdef VBOX_WITH_RAW_MODE
1674 fGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1675#endif
1676 if (fGif)
1677 {
1678 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1679 {
1680 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1681 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1682 {
1683 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1684 {
1685 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1686 if (RT_SUCCESS(rcStrict))
1687 {
1688 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1689 * doesn't intercept HLT but intercepts INTR? */
1690 *pfResched = true;
1691 return VINF_EM_RESCHEDULE;
1692 }
1693
1694 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1695 return VINF_EM_TRIPLE_FAULT;
1696 }
1697
1698 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1699 /** @todo this really isn't nice, should properly handle this */
1700 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1701 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1702 || rc == VINF_EM_RESCHEDULE_HM
1703 || rc == VINF_EM_RESCHEDULE_RAW))
1704 {
1705 rc = VINF_EM_RESCHEDULE;
1706 }
1707
1708 *pfResched = true;
1709 *pfInject = true;
1710 return rc;
1711 }
1712 }
1713
1714 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1715 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1716 {
1717 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1718 {
1719 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1720 if (RT_SUCCESS(rcStrict))
1721 {
1722 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1723 * doesn't intercept HLT but intercepts VINTR? */
1724 *pfResched = true;
1725 return VINF_EM_RESCHEDULE;
1726 }
1727
1728 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1729 return VINF_EM_TRIPLE_FAULT;
1730 }
1731
1732 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1733 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1734 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1735 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1736 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1737
1738 *pfResched = true;
1739 *pfInject = true;
1740 return VINF_EM_RESCHEDULE;
1741 }
1742 }
1743 return VINF_SUCCESS;
1744 }
1745
1746 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1747 { /** @todo Nested VMX. */ }
1748
1749 /* Shouldn't really get here. */
1750 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1751 return VERR_EM_INTERNAL_ERROR;
1752}
1753#endif
1754
1755/**
1756 * Executes all pending forced actions.
1757 *
1758 * Forced actions can cause execution delays and execution
1759 * rescheduling. The first we deal with using action priority, so
1760 * that for instance pending timers aren't scheduled and ran until
1761 * right before execution. The rescheduling we deal with using
1762 * return codes. The same goes for VM termination, only in that case
1763 * we exit everything.
1764 *
1765 * @returns VBox status code of equal or greater importance/severity than rc.
1766 * The most important ones are: VINF_EM_RESCHEDULE,
1767 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1768 *
1769 * @param pVM The cross context VM structure.
1770 * @param pVCpu The cross context virtual CPU structure.
1771 * @param rc The current rc.
1772 *
1773 */
1774int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1775{
1776 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1777#ifdef VBOX_STRICT
1778 int rcIrq = VINF_SUCCESS;
1779#endif
1780 int rc2;
1781#define UPDATE_RC() \
1782 do { \
1783 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1784 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1785 break; \
1786 if (!rc || rc2 < rc) \
1787 rc = rc2; \
1788 } while (0)
1789 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1790
1791 /*
1792 * Post execution chunk first.
1793 */
1794 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1795 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1796 {
1797 /*
1798 * EMT Rendezvous (must be serviced before termination).
1799 */
1800 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1801 {
1802 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1803 UPDATE_RC();
1804 /** @todo HACK ALERT! The following test is to make sure EM+TM
1805 * thinks the VM is stopped/reset before the next VM state change
1806 * is made. We need a better solution for this, or at least make it
1807 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1808 * VINF_EM_SUSPEND). */
1809 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1810 {
1811 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1812 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1813 return rc;
1814 }
1815 }
1816
1817 /*
1818 * State change request (cleared by vmR3SetStateLocked).
1819 */
1820 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1821 {
1822 VMSTATE enmState = VMR3GetState(pVM);
1823 switch (enmState)
1824 {
1825 case VMSTATE_FATAL_ERROR:
1826 case VMSTATE_FATAL_ERROR_LS:
1827 case VMSTATE_GURU_MEDITATION:
1828 case VMSTATE_GURU_MEDITATION_LS:
1829 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1830 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1831 return VINF_EM_SUSPEND;
1832
1833 case VMSTATE_DESTROYING:
1834 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1835 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1836 return VINF_EM_TERMINATE;
1837
1838 default:
1839 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1840 }
1841 }
1842
1843 /*
1844 * Debugger Facility polling.
1845 */
1846 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1847 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1848 {
1849 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1850 UPDATE_RC();
1851 }
1852
1853 /*
1854 * Postponed reset request.
1855 */
1856 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1857 {
1858 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1859 UPDATE_RC();
1860 }
1861
1862#ifdef VBOX_WITH_RAW_MODE
1863 /*
1864 * CSAM page scanning.
1865 */
1866 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1867 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1868 {
1869 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1870
1871 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1872 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1873
1874 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1875 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1876 }
1877#endif
1878
1879 /*
1880 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1881 */
1882 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1883 {
1884 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1885 UPDATE_RC();
1886 if (rc == VINF_EM_NO_MEMORY)
1887 return rc;
1888 }
1889
1890 /* check that we got them all */
1891 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1892 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1893 }
1894
1895 /*
1896 * Normal priority then.
1897 * (Executed in no particular order.)
1898 */
1899 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1900 {
1901 /*
1902 * PDM Queues are pending.
1903 */
1904 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1905 PDMR3QueueFlushAll(pVM);
1906
1907 /*
1908 * PDM DMA transfers are pending.
1909 */
1910 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1911 PDMR3DmaRun(pVM);
1912
1913 /*
1914 * EMT Rendezvous (make sure they are handled before the requests).
1915 */
1916 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1917 {
1918 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1919 UPDATE_RC();
1920 /** @todo HACK ALERT! The following test is to make sure EM+TM
1921 * thinks the VM is stopped/reset before the next VM state change
1922 * is made. We need a better solution for this, or at least make it
1923 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1924 * VINF_EM_SUSPEND). */
1925 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1926 {
1927 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1928 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1929 return rc;
1930 }
1931 }
1932
1933 /*
1934 * Requests from other threads.
1935 */
1936 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1937 {
1938 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1939 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1940 {
1941 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1942 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1943 return rc2;
1944 }
1945 UPDATE_RC();
1946 /** @todo HACK ALERT! The following test is to make sure EM+TM
1947 * thinks the VM is stopped/reset before the next VM state change
1948 * is made. We need a better solution for this, or at least make it
1949 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1950 * VINF_EM_SUSPEND). */
1951 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1952 {
1953 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1954 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1955 return rc;
1956 }
1957 }
1958
1959#ifdef VBOX_WITH_REM
1960 /* Replay the handler notification changes. */
1961 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1962 {
1963 /* Try not to cause deadlocks. */
1964 if ( pVM->cCpus == 1
1965 || ( !PGMIsLockOwner(pVM)
1966 && !IOMIsLockWriteOwner(pVM))
1967 )
1968 {
1969 EMRemLock(pVM);
1970 REMR3ReplayHandlerNotifications(pVM);
1971 EMRemUnlock(pVM);
1972 }
1973 }
1974#endif
1975
1976 /* check that we got them all */
1977 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1978 }
1979
1980 /*
1981 * Normal priority then. (per-VCPU)
1982 * (Executed in no particular order.)
1983 */
1984 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1985 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1986 {
1987 /*
1988 * Requests from other threads.
1989 */
1990 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1991 {
1992 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1993 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1994 {
1995 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1996 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1997 return rc2;
1998 }
1999 UPDATE_RC();
2000 /** @todo HACK ALERT! The following test is to make sure EM+TM
2001 * thinks the VM is stopped/reset before the next VM state change
2002 * is made. We need a better solution for this, or at least make it
2003 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2004 * VINF_EM_SUSPEND). */
2005 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2006 {
2007 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2008 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2009 return rc;
2010 }
2011 }
2012
2013 /* check that we got them all */
2014 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2015 }
2016
2017 /*
2018 * High priority pre execution chunk last.
2019 * (Executed in ascending priority order.)
2020 */
2021 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2022 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2023 {
2024 /*
2025 * Timers before interrupts.
2026 */
2027 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2028 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2029 TMR3TimerQueuesDo(pVM);
2030
2031 /*
2032 * Pick up asynchronously posted interrupts into the APIC.
2033 */
2034 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2035 APICUpdatePendingInterrupts(pVCpu);
2036
2037 /*
2038 * The instruction following an emulated STI should *always* be executed!
2039 *
2040 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2041 * the eip is the same as the inhibited instr address. Before we
2042 * are able to execute this instruction in raw mode (iret to
2043 * guest code) an external interrupt might force a world switch
2044 * again. Possibly allowing a guest interrupt to be dispatched
2045 * in the process. This could break the guest. Sounds very
2046 * unlikely, but such timing sensitive problem are not as rare as
2047 * you might think.
2048 */
2049 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2050 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2051 {
2052 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2053 {
2054 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2055 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2056 }
2057 else
2058 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2059 }
2060
2061 /*
2062 * Interrupts.
2063 */
2064 bool fWakeupPending = false;
2065 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2066 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2067 {
2068 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2069 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2070 {
2071 Assert(!HMR3IsEventPending(pVCpu));
2072 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2073#ifdef VBOX_WITH_NESTED_HWVIRT
2074 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2075 {
2076 bool fResched, fInject;
2077 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2078 if (fInject)
2079 {
2080 fWakeupPending = true;
2081#ifdef VBOX_STRICT
2082 rcIrq = rc2;
2083#endif
2084 }
2085 if (fResched)
2086 UPDATE_RC();
2087 }
2088 else
2089#endif
2090 {
2091 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2092#ifdef VBOX_WITH_NESTED_HWVIRT
2093 && pCtx->hwvirt.fGif
2094#endif
2095#ifdef VBOX_WITH_RAW_MODE
2096 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2097#endif
2098 && pCtx->eflags.Bits.u1IF)
2099 {
2100 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2101 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2102 /** @todo this really isn't nice, should properly handle this */
2103 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2104 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2105 || rc2 == VINF_EM_RESCHEDULE_HM
2106 || rc2 == VINF_EM_RESCHEDULE_RAW))
2107 {
2108 rc2 = VINF_EM_RESCHEDULE;
2109 }
2110#ifdef VBOX_STRICT
2111 rcIrq = rc2;
2112#endif
2113 UPDATE_RC();
2114 /* Reschedule required: We must not miss the wakeup below! */
2115 fWakeupPending = true;
2116 }
2117 }
2118 }
2119 }
2120
2121 /*
2122 * Allocate handy pages.
2123 */
2124 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2125 {
2126 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2127 UPDATE_RC();
2128 }
2129
2130 /*
2131 * Debugger Facility request.
2132 */
2133 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2134 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2135 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2136 {
2137 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2138 UPDATE_RC();
2139 }
2140
2141 /*
2142 * EMT Rendezvous (must be serviced before termination).
2143 */
2144 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2145 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2146 {
2147 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2148 UPDATE_RC();
2149 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2150 * stopped/reset before the next VM state change is made. We need a better
2151 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2152 * && rc >= VINF_EM_SUSPEND). */
2153 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2154 {
2155 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2156 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2157 return rc;
2158 }
2159 }
2160
2161 /*
2162 * State change request (cleared by vmR3SetStateLocked).
2163 */
2164 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2165 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2166 {
2167 VMSTATE enmState = VMR3GetState(pVM);
2168 switch (enmState)
2169 {
2170 case VMSTATE_FATAL_ERROR:
2171 case VMSTATE_FATAL_ERROR_LS:
2172 case VMSTATE_GURU_MEDITATION:
2173 case VMSTATE_GURU_MEDITATION_LS:
2174 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2175 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2176 return VINF_EM_SUSPEND;
2177
2178 case VMSTATE_DESTROYING:
2179 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2180 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2181 return VINF_EM_TERMINATE;
2182
2183 default:
2184 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2185 }
2186 }
2187
2188 /*
2189 * Out of memory? Since most of our fellow high priority actions may cause us
2190 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2191 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2192 * than us since we can terminate without allocating more memory.
2193 */
2194 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2195 {
2196 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2197 UPDATE_RC();
2198 if (rc == VINF_EM_NO_MEMORY)
2199 return rc;
2200 }
2201
2202 /*
2203 * If the virtual sync clock is still stopped, make TM restart it.
2204 */
2205 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2206 TMR3VirtualSyncFF(pVM, pVCpu);
2207
2208#ifdef DEBUG
2209 /*
2210 * Debug, pause the VM.
2211 */
2212 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2213 {
2214 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2215 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2216 return VINF_EM_SUSPEND;
2217 }
2218#endif
2219
2220 /* check that we got them all */
2221 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2222 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2223 }
2224
2225#undef UPDATE_RC
2226 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2227 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2228 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2229 return rc;
2230}
2231
2232
2233/**
2234 * Check if the preset execution time cap restricts guest execution scheduling.
2235 *
2236 * @returns true if allowed, false otherwise
2237 * @param pVM The cross context VM structure.
2238 * @param pVCpu The cross context virtual CPU structure.
2239 */
2240bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2241{
2242 uint64_t u64UserTime, u64KernelTime;
2243
2244 if ( pVM->uCpuExecutionCap != 100
2245 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2246 {
2247 uint64_t u64TimeNow = RTTimeMilliTS();
2248 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2249 {
2250 /* New time slice. */
2251 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2252 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2253 pVCpu->em.s.u64TimeSliceExec = 0;
2254 }
2255 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2256
2257 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2258 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2259 return false;
2260 }
2261 return true;
2262}
2263
2264
2265/**
2266 * Execute VM.
2267 *
2268 * This function is the main loop of the VM. The emulation thread
2269 * calls this function when the VM has been successfully constructed
2270 * and we're ready for executing the VM.
2271 *
2272 * Returning from this function means that the VM is turned off or
2273 * suspended (state already saved) and deconstruction is next in line.
2274 *
2275 * All interaction from other thread are done using forced actions
2276 * and signaling of the wait object.
2277 *
2278 * @returns VBox status code, informational status codes may indicate failure.
2279 * @param pVM The cross context VM structure.
2280 * @param pVCpu The cross context virtual CPU structure.
2281 */
2282VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2283{
2284 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2285 pVM,
2286 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2287 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2288 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2289 pVCpu->em.s.fForceRAW));
2290 VM_ASSERT_EMT(pVM);
2291 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2292 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2293 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2294 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2295
2296 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2297 if (rc == 0)
2298 {
2299 /*
2300 * Start the virtual time.
2301 */
2302 TMR3NotifyResume(pVM, pVCpu);
2303
2304 /*
2305 * The Outer Main Loop.
2306 */
2307 bool fFFDone = false;
2308
2309 /* Reschedule right away to start in the right state. */
2310 rc = VINF_SUCCESS;
2311
2312 /* If resuming after a pause or a state load, restore the previous
2313 state or else we'll start executing code. Else, just reschedule. */
2314 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2315 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2316 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2317 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2318 else
2319 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2320 pVCpu->em.s.cIemThenRemInstructions = 0;
2321 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2322
2323 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2324 for (;;)
2325 {
2326 /*
2327 * Before we can schedule anything (we're here because
2328 * scheduling is required) we must service any pending
2329 * forced actions to avoid any pending action causing
2330 * immediate rescheduling upon entering an inner loop
2331 *
2332 * Do forced actions.
2333 */
2334 if ( !fFFDone
2335 && RT_SUCCESS(rc)
2336 && rc != VINF_EM_TERMINATE
2337 && rc != VINF_EM_OFF
2338 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2339 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2340 {
2341 rc = emR3ForcedActions(pVM, pVCpu, rc);
2342 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2343 if ( ( rc == VINF_EM_RESCHEDULE_REM
2344 || rc == VINF_EM_RESCHEDULE_HM)
2345 && pVCpu->em.s.fForceRAW)
2346 rc = VINF_EM_RESCHEDULE_RAW;
2347 }
2348 else if (fFFDone)
2349 fFFDone = false;
2350
2351 /*
2352 * Now what to do?
2353 */
2354 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2355 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2356 switch (rc)
2357 {
2358 /*
2359 * Keep doing what we're currently doing.
2360 */
2361 case VINF_SUCCESS:
2362 break;
2363
2364 /*
2365 * Reschedule - to raw-mode execution.
2366 */
2367/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2368 case VINF_EM_RESCHEDULE_RAW:
2369 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2370 if (VM_IS_RAW_MODE_ENABLED(pVM))
2371 {
2372 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2373 pVCpu->em.s.enmState = EMSTATE_RAW;
2374 }
2375 else
2376 {
2377 AssertLogRelFailed();
2378 pVCpu->em.s.enmState = EMSTATE_NONE;
2379 }
2380 break;
2381
2382 /*
2383 * Reschedule - to HM or NEM.
2384 */
2385 case VINF_EM_RESCHEDULE_HM:
2386 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2387 Assert(!pVCpu->em.s.fForceRAW);
2388 if (VM_IS_HM_ENABLED(pVM))
2389 {
2390 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2391 pVCpu->em.s.enmState = EMSTATE_HM;
2392 }
2393 else if (VM_IS_NEM_ENABLED(pVM))
2394 {
2395 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2396 pVCpu->em.s.enmState = EMSTATE_NEM;
2397 }
2398 else
2399 {
2400 AssertLogRelFailed();
2401 pVCpu->em.s.enmState = EMSTATE_NONE;
2402 }
2403 break;
2404
2405 /*
2406 * Reschedule - to recompiled execution.
2407 */
2408 case VINF_EM_RESCHEDULE_REM:
2409 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2410 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2411 {
2412 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2413 enmOldState, EMSTATE_IEM_THEN_REM));
2414 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2415 {
2416 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2417 pVCpu->em.s.cIemThenRemInstructions = 0;
2418 }
2419 }
2420 else
2421 {
2422 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2423 pVCpu->em.s.enmState = EMSTATE_REM;
2424 }
2425 break;
2426
2427 /*
2428 * Resume.
2429 */
2430 case VINF_EM_RESUME:
2431 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2432 /* Don't reschedule in the halted or wait for SIPI case. */
2433 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2434 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2435 {
2436 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2437 break;
2438 }
2439 /* fall through and get scheduled. */
2440 RT_FALL_THRU();
2441
2442 /*
2443 * Reschedule.
2444 */
2445 case VINF_EM_RESCHEDULE:
2446 {
2447 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2448 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2449 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2450 pVCpu->em.s.cIemThenRemInstructions = 0;
2451 pVCpu->em.s.enmState = enmState;
2452 break;
2453 }
2454
2455 /*
2456 * Halted.
2457 */
2458 case VINF_EM_HALT:
2459 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2460 pVCpu->em.s.enmState = EMSTATE_HALTED;
2461 break;
2462
2463 /*
2464 * Switch to the wait for SIPI state (application processor only)
2465 */
2466 case VINF_EM_WAIT_SIPI:
2467 Assert(pVCpu->idCpu != 0);
2468 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2469 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2470 break;
2471
2472
2473 /*
2474 * Suspend.
2475 */
2476 case VINF_EM_SUSPEND:
2477 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2478 Assert(enmOldState != EMSTATE_SUSPENDED);
2479 pVCpu->em.s.enmPrevState = enmOldState;
2480 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2481 break;
2482
2483 /*
2484 * Reset.
2485 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2486 */
2487 case VINF_EM_RESET:
2488 {
2489 if (pVCpu->idCpu == 0)
2490 {
2491 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2492 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2493 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2494 pVCpu->em.s.cIemThenRemInstructions = 0;
2495 pVCpu->em.s.enmState = enmState;
2496 }
2497 else
2498 {
2499 /* All other VCPUs go into the wait for SIPI state. */
2500 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2501 }
2502 break;
2503 }
2504
2505 /*
2506 * Power Off.
2507 */
2508 case VINF_EM_OFF:
2509 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2510 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2511 TMR3NotifySuspend(pVM, pVCpu);
2512 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2513 return rc;
2514
2515 /*
2516 * Terminate the VM.
2517 */
2518 case VINF_EM_TERMINATE:
2519 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2520 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2521 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2522 TMR3NotifySuspend(pVM, pVCpu);
2523 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2524 return rc;
2525
2526
2527 /*
2528 * Out of memory, suspend the VM and stuff.
2529 */
2530 case VINF_EM_NO_MEMORY:
2531 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2532 Assert(enmOldState != EMSTATE_SUSPENDED);
2533 pVCpu->em.s.enmPrevState = enmOldState;
2534 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2535 TMR3NotifySuspend(pVM, pVCpu);
2536 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2537
2538 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2539 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2540 if (rc != VINF_EM_SUSPEND)
2541 {
2542 if (RT_SUCCESS_NP(rc))
2543 {
2544 AssertLogRelMsgFailed(("%Rrc\n", rc));
2545 rc = VERR_EM_INTERNAL_ERROR;
2546 }
2547 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2548 }
2549 return rc;
2550
2551 /*
2552 * Guest debug events.
2553 */
2554 case VINF_EM_DBG_STEPPED:
2555 case VINF_EM_DBG_STOP:
2556 case VINF_EM_DBG_EVENT:
2557 case VINF_EM_DBG_BREAKPOINT:
2558 case VINF_EM_DBG_STEP:
2559 if (enmOldState == EMSTATE_RAW)
2560 {
2561 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2562 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2563 }
2564 else if (enmOldState == EMSTATE_HM)
2565 {
2566 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2567 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2568 }
2569 else if (enmOldState == EMSTATE_NEM)
2570 {
2571 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2572 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2573 }
2574 else if (enmOldState == EMSTATE_REM)
2575 {
2576 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2577 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2578 }
2579 else
2580 {
2581 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2582 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2583 }
2584 break;
2585
2586 /*
2587 * Hypervisor debug events.
2588 */
2589 case VINF_EM_DBG_HYPER_STEPPED:
2590 case VINF_EM_DBG_HYPER_BREAKPOINT:
2591 case VINF_EM_DBG_HYPER_ASSERTION:
2592 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2593 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2594 break;
2595
2596 /*
2597 * Triple fault.
2598 */
2599 case VINF_EM_TRIPLE_FAULT:
2600 if (!pVM->em.s.fGuruOnTripleFault)
2601 {
2602 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2603 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2604 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2605 continue;
2606 }
2607 /* Else fall through and trigger a guru. */
2608 RT_FALL_THRU();
2609
2610 case VERR_VMM_RING0_ASSERTION:
2611 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2612 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2613 break;
2614
2615 /*
2616 * Any error code showing up here other than the ones we
2617 * know and process above are considered to be FATAL.
2618 *
2619 * Unknown warnings and informational status codes are also
2620 * included in this.
2621 */
2622 default:
2623 if (RT_SUCCESS_NP(rc))
2624 {
2625 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2626 rc = VERR_EM_INTERNAL_ERROR;
2627 }
2628 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2629 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2630 break;
2631 }
2632
2633 /*
2634 * Act on state transition.
2635 */
2636 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2637 if (enmOldState != enmNewState)
2638 {
2639 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2640
2641 /* Clear MWait flags and the unhalt FF. */
2642 if ( enmOldState == EMSTATE_HALTED
2643 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2644 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2645 && ( enmNewState == EMSTATE_RAW
2646 || enmNewState == EMSTATE_HM
2647 || enmNewState == EMSTATE_NEM
2648 || enmNewState == EMSTATE_REM
2649 || enmNewState == EMSTATE_IEM_THEN_REM
2650 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2651 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2652 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2653 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2654 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2655 {
2656 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2657 {
2658 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2659 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2660 }
2661 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2662 {
2663 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2664 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2665 }
2666 }
2667 }
2668 else
2669 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2670
2671 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2672 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2673
2674 /*
2675 * Act on the new state.
2676 */
2677 switch (enmNewState)
2678 {
2679 /*
2680 * Execute raw.
2681 */
2682 case EMSTATE_RAW:
2683#ifdef VBOX_WITH_RAW_MODE
2684 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2685#else
2686 AssertLogRelMsgFailed(("%Rrc\n", rc));
2687 rc = VERR_EM_INTERNAL_ERROR;
2688#endif
2689 break;
2690
2691 /*
2692 * Execute hardware accelerated raw.
2693 */
2694 case EMSTATE_HM:
2695 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2696 break;
2697
2698 /*
2699 * Execute hardware accelerated raw.
2700 */
2701 case EMSTATE_NEM:
2702 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2703 break;
2704
2705 /*
2706 * Execute recompiled.
2707 */
2708 case EMSTATE_REM:
2709 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2710 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2711 break;
2712
2713 /*
2714 * Execute in the interpreter.
2715 */
2716 case EMSTATE_IEM:
2717 {
2718#if 0 /* For testing purposes. */
2719 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2720 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2721 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2722 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2723 rc = VINF_SUCCESS;
2724 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2725#endif
2726 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2727 if (pVM->em.s.fIemExecutesAll)
2728 {
2729 Assert(rc != VINF_EM_RESCHEDULE_REM);
2730 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2731 Assert(rc != VINF_EM_RESCHEDULE_HM);
2732 }
2733 fFFDone = false;
2734 break;
2735 }
2736
2737 /*
2738 * Execute in IEM, hoping we can quickly switch aback to HM
2739 * or RAW execution. If our hopes fail, we go to REM.
2740 */
2741 case EMSTATE_IEM_THEN_REM:
2742 {
2743 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2744 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2745 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2746 break;
2747 }
2748
2749 /*
2750 * Application processor execution halted until SIPI.
2751 */
2752 case EMSTATE_WAIT_SIPI:
2753 /* no break */
2754 /*
2755 * hlt - execution halted until interrupt.
2756 */
2757 case EMSTATE_HALTED:
2758 {
2759 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2760 /* If HM (or someone else) store a pending interrupt in
2761 TRPM, it must be dispatched ASAP without any halting.
2762 Anything pending in TRPM has been accepted and the CPU
2763 should already be the right state to receive it. */
2764 if (TRPMHasTrap(pVCpu))
2765 rc = VINF_EM_RESCHEDULE;
2766 /* MWAIT has a special extension where it's woken up when
2767 an interrupt is pending even when IF=0. */
2768 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2769 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2770 {
2771 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2772 if (rc == VINF_SUCCESS)
2773 {
2774 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2775 APICUpdatePendingInterrupts(pVCpu);
2776
2777 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2778 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2779 {
2780 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2781 rc = VINF_EM_RESCHEDULE;
2782 }
2783 }
2784 }
2785 else
2786 {
2787 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2788 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2789 check VMCPU_FF_UPDATE_APIC here. */
2790 if ( rc == VINF_SUCCESS
2791 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2792 {
2793 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2794 rc = VINF_EM_RESCHEDULE;
2795 }
2796 }
2797
2798 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2799 break;
2800 }
2801
2802 /*
2803 * Suspended - return to VM.cpp.
2804 */
2805 case EMSTATE_SUSPENDED:
2806 TMR3NotifySuspend(pVM, pVCpu);
2807 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2808 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2809 return VINF_EM_SUSPEND;
2810
2811 /*
2812 * Debugging in the guest.
2813 */
2814 case EMSTATE_DEBUG_GUEST_RAW:
2815 case EMSTATE_DEBUG_GUEST_HM:
2816 case EMSTATE_DEBUG_GUEST_NEM:
2817 case EMSTATE_DEBUG_GUEST_IEM:
2818 case EMSTATE_DEBUG_GUEST_REM:
2819 TMR3NotifySuspend(pVM, pVCpu);
2820 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2821 TMR3NotifyResume(pVM, pVCpu);
2822 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2823 break;
2824
2825 /*
2826 * Debugging in the hypervisor.
2827 */
2828 case EMSTATE_DEBUG_HYPER:
2829 {
2830 TMR3NotifySuspend(pVM, pVCpu);
2831 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2832
2833 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2834 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2835 if (rc != VINF_SUCCESS)
2836 {
2837 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2838 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2839 else
2840 {
2841 /* switch to guru meditation mode */
2842 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2843 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2844 VMMR3FatalDump(pVM, pVCpu, rc);
2845 }
2846 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2847 return rc;
2848 }
2849
2850 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2851 TMR3NotifyResume(pVM, pVCpu);
2852 break;
2853 }
2854
2855 /*
2856 * Guru meditation takes place in the debugger.
2857 */
2858 case EMSTATE_GURU_MEDITATION:
2859 {
2860 TMR3NotifySuspend(pVM, pVCpu);
2861 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2862 VMMR3FatalDump(pVM, pVCpu, rc);
2863 emR3Debug(pVM, pVCpu, rc);
2864 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2865 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2866 return rc;
2867 }
2868
2869 /*
2870 * The states we don't expect here.
2871 */
2872 case EMSTATE_NONE:
2873 case EMSTATE_TERMINATING:
2874 default:
2875 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2876 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2877 TMR3NotifySuspend(pVM, pVCpu);
2878 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2879 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2880 return VERR_EM_INTERNAL_ERROR;
2881 }
2882 } /* The Outer Main Loop */
2883 }
2884 else
2885 {
2886 /*
2887 * Fatal error.
2888 */
2889 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2890 TMR3NotifySuspend(pVM, pVCpu);
2891 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2892 VMMR3FatalDump(pVM, pVCpu, rc);
2893 emR3Debug(pVM, pVCpu, rc);
2894 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2895 /** @todo change the VM state! */
2896 return rc;
2897 }
2898
2899 /* not reached */
2900}
2901
2902/**
2903 * Notify EM of a state change (used by FTM)
2904 *
2905 * @param pVM The cross context VM structure.
2906 */
2907VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2908{
2909 PVMCPU pVCpu = VMMGetCpu(pVM);
2910
2911 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2912 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2913 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2914 return VINF_SUCCESS;
2915}
2916
2917/**
2918 * Notify EM of a state change (used by FTM)
2919 *
2920 * @param pVM The cross context VM structure.
2921 */
2922VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2923{
2924 PVMCPU pVCpu = VMMGetCpu(pVM);
2925 EMSTATE enmCurState = pVCpu->em.s.enmState;
2926
2927 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2928 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2929 pVCpu->em.s.enmPrevState = enmCurState;
2930 return VINF_SUCCESS;
2931}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette