VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 71289

Last change on this file since 71289 was 71020, checked in by vboxsync, 7 years ago

NEM: More code. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 133.3 KB
Line 
1/* $Id: EM.cpp 71020 2018-02-14 22:46:51Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/nem.h>
47#include <VBox/vmm/iom.h>
48#include <VBox/vmm/dbgf.h>
49#include <VBox/vmm/pgm.h>
50#ifdef VBOX_WITH_REM
51# include <VBox/vmm/rem.h>
52#endif
53#include <VBox/vmm/apic.h>
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/mm.h>
56#include <VBox/vmm/ssm.h>
57#include <VBox/vmm/pdmapi.h>
58#include <VBox/vmm/pdmcritsect.h>
59#include <VBox/vmm/pdmqueue.h>
60#include <VBox/vmm/hm.h>
61#include <VBox/vmm/patm.h>
62#include "EMInternal.h"
63#include <VBox/vmm/vm.h>
64#include <VBox/vmm/uvm.h>
65#include <VBox/vmm/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include "VMMTracing.h"
69
70#include <iprt/asm.h>
71#include <iprt/string.h>
72#include <iprt/stream.h>
73#include <iprt/thread.h>
74
75
76/*********************************************************************************************************************************
77* Defined Constants And Macros *
78*********************************************************************************************************************************/
79#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
80#define EM_NOTIFY_HM
81#endif
82
83
84/*********************************************************************************************************************************
85* Internal Functions *
86*********************************************************************************************************************************/
87static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
88static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
89#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
90static const char *emR3GetStateName(EMSTATE enmState);
91#endif
92static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
93#if defined(VBOX_WITH_REM) || defined(DEBUG)
94static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
95#endif
96static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
97int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
153
154#ifdef VBOX_WITH_REM
155 /*
156 * Initialize the REM critical section.
157 */
158 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
159 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
160 AssertRCReturn(rc, rc);
161#endif
162
163 /*
164 * Saved state.
165 */
166 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
167 NULL, NULL, NULL,
168 NULL, emR3Save, NULL,
169 NULL, emR3Load, NULL);
170 if (RT_FAILURE(rc))
171 return rc;
172
173 for (VMCPUID i = 0; i < pVM->cCpus; i++)
174 {
175 PVMCPU pVCpu = &pVM->aCpus[i];
176
177 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
178 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
179 pVCpu->em.s.fForceRAW = false;
180
181 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
182#ifdef VBOX_WITH_RAW_MODE
183 if (VM_IS_RAW_MODE_ENABLED(pVM))
184 {
185 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
186 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
187 }
188#endif
189
190 /* Force reset of the time slice. */
191 pVCpu->em.s.u64TimeSliceStart = 0;
192
193# define EM_REG_COUNTER(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_COUNTER_USED(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
199 AssertRC(rc);
200
201# define EM_REG_PROFILE(a, b, c) \
202 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
203 AssertRC(rc);
204
205# define EM_REG_PROFILE_ADV(a, b, c) \
206 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
207 AssertRC(rc);
208
209 /*
210 * Statistics.
211 */
212#ifdef VBOX_WITH_STATISTICS
213 PEMSTATS pStats;
214 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 pVCpu->em.s.pStatsR3 = pStats;
219 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
220 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
221
222 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
223 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
224
225 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
227
228 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
302
303 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
304 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
305
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
358
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
387
388 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
390 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
392
393 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
432 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
433 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
434 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
435 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
436 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
440
441#endif /* VBOX_WITH_STATISTICS */
442
443 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
444 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
445 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
446 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
447 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
448
449 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
450 }
451
452 emR3InitDbg(pVM);
453 return VINF_SUCCESS;
454}
455
456
457/**
458 * Applies relocations to data and code managed by this
459 * component. This function will be called at init and
460 * whenever the VMM need to relocate it self inside the GC.
461 *
462 * @param pVM The cross context VM structure.
463 */
464VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
465{
466 LogFlow(("EMR3Relocate\n"));
467 for (VMCPUID i = 0; i < pVM->cCpus; i++)
468 {
469 PVMCPU pVCpu = &pVM->aCpus[i];
470 if (pVCpu->em.s.pStatsR3)
471 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
472 }
473}
474
475
476/**
477 * Reset the EM state for a CPU.
478 *
479 * Called by EMR3Reset and hot plugging.
480 *
481 * @param pVCpu The cross context virtual CPU structure.
482 */
483VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
484{
485 /* Reset scheduling state. */
486 pVCpu->em.s.fForceRAW = false;
487 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
488
489 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
490 out of the HALTED state here so that enmPrevState doesn't end up as
491 HALTED when EMR3Execute returns. */
492 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
493 {
494 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
495 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
496 }
497}
498
499
500/**
501 * Reset notification.
502 *
503 * @param pVM The cross context VM structure.
504 */
505VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
506{
507 Log(("EMR3Reset: \n"));
508 for (VMCPUID i = 0; i < pVM->cCpus; i++)
509 EMR3ResetCpu(&pVM->aCpus[i]);
510}
511
512
513/**
514 * Terminates the EM.
515 *
516 * Termination means cleaning up and freeing all resources,
517 * the VM it self is at this point powered off or suspended.
518 *
519 * @returns VBox status code.
520 * @param pVM The cross context VM structure.
521 */
522VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
523{
524 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
525
526#ifdef VBOX_WITH_REM
527 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
528#else
529 RT_NOREF(pVM);
530#endif
531 return VINF_SUCCESS;
532}
533
534
535/**
536 * Execute state save operation.
537 *
538 * @returns VBox status code.
539 * @param pVM The cross context VM structure.
540 * @param pSSM SSM operation handle.
541 */
542static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
543{
544 for (VMCPUID i = 0; i < pVM->cCpus; i++)
545 {
546 PVMCPU pVCpu = &pVM->aCpus[i];
547
548 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
549 AssertRCReturn(rc, rc);
550
551 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
552 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
553 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
554 AssertRCReturn(rc, rc);
555
556 /* Save mwait state. */
557 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
558 AssertRCReturn(rc, rc);
559 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
560 AssertRCReturn(rc, rc);
561 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
562 AssertRCReturn(rc, rc);
563 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
564 AssertRCReturn(rc, rc);
565 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
566 AssertRCReturn(rc, rc);
567 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
568 AssertRCReturn(rc, rc);
569 }
570 return VINF_SUCCESS;
571}
572
573
574/**
575 * Execute state load operation.
576 *
577 * @returns VBox status code.
578 * @param pVM The cross context VM structure.
579 * @param pSSM SSM operation handle.
580 * @param uVersion Data layout version.
581 * @param uPass The data pass.
582 */
583static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
584{
585 /*
586 * Validate version.
587 */
588 if ( uVersion > EM_SAVED_STATE_VERSION
589 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
590 {
591 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
592 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
593 }
594 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
595
596 /*
597 * Load the saved state.
598 */
599 for (VMCPUID i = 0; i < pVM->cCpus; i++)
600 {
601 PVMCPU pVCpu = &pVM->aCpus[i];
602
603 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
604 if (RT_FAILURE(rc))
605 pVCpu->em.s.fForceRAW = false;
606 AssertRCReturn(rc, rc);
607
608 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
609 {
610 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
611 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
612 AssertRCReturn(rc, rc);
613 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
614
615 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
616 }
617 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
618 {
619 /* Load mwait state. */
620 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
621 AssertRCReturn(rc, rc);
622 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
623 AssertRCReturn(rc, rc);
624 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
625 AssertRCReturn(rc, rc);
626 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
627 AssertRCReturn(rc, rc);
628 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
629 AssertRCReturn(rc, rc);
630 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
631 AssertRCReturn(rc, rc);
632 }
633
634 Assert(!pVCpu->em.s.pCliStatTree);
635 }
636 return VINF_SUCCESS;
637}
638
639
640/**
641 * Argument packet for emR3SetExecutionPolicy.
642 */
643struct EMR3SETEXECPOLICYARGS
644{
645 EMEXECPOLICY enmPolicy;
646 bool fEnforce;
647};
648
649
650/**
651 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
652 */
653static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
654{
655 /*
656 * Only the first CPU changes the variables.
657 */
658 if (pVCpu->idCpu == 0)
659 {
660 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
661 switch (pArgs->enmPolicy)
662 {
663 case EMEXECPOLICY_RECOMPILE_RING0:
664 pVM->fRecompileSupervisor = pArgs->fEnforce;
665 break;
666 case EMEXECPOLICY_RECOMPILE_RING3:
667 pVM->fRecompileUser = pArgs->fEnforce;
668 break;
669 case EMEXECPOLICY_IEM_ALL:
670 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
671 break;
672 default:
673 AssertFailedReturn(VERR_INVALID_PARAMETER);
674 }
675 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
676 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
677 }
678
679 /*
680 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
681 */
682 return pVCpu->em.s.enmState == EMSTATE_RAW
683 || pVCpu->em.s.enmState == EMSTATE_HM
684 || pVCpu->em.s.enmState == EMSTATE_NEM
685 || pVCpu->em.s.enmState == EMSTATE_IEM
686 || pVCpu->em.s.enmState == EMSTATE_REM
687 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
688 ? VINF_EM_RESCHEDULE
689 : VINF_SUCCESS;
690}
691
692
693/**
694 * Changes an execution scheduling policy parameter.
695 *
696 * This is used to enable or disable raw-mode / hardware-virtualization
697 * execution of user and supervisor code.
698 *
699 * @returns VINF_SUCCESS on success.
700 * @returns VINF_RESCHEDULE if a rescheduling might be required.
701 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
702 *
703 * @param pUVM The user mode VM handle.
704 * @param enmPolicy The scheduling policy to change.
705 * @param fEnforce Whether to enforce the policy or not.
706 */
707VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
708{
709 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
710 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
711 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
712
713 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
714 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
715}
716
717
718/**
719 * Queries an execution scheduling policy parameter.
720 *
721 * @returns VBox status code
722 * @param pUVM The user mode VM handle.
723 * @param enmPolicy The scheduling policy to query.
724 * @param pfEnforced Where to return the current value.
725 */
726VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
727{
728 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
729 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
730 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
731 PVM pVM = pUVM->pVM;
732 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
733
734 /* No need to bother EMTs with a query. */
735 switch (enmPolicy)
736 {
737 case EMEXECPOLICY_RECOMPILE_RING0:
738 *pfEnforced = pVM->fRecompileSupervisor;
739 break;
740 case EMEXECPOLICY_RECOMPILE_RING3:
741 *pfEnforced = pVM->fRecompileUser;
742 break;
743 case EMEXECPOLICY_IEM_ALL:
744 *pfEnforced = pVM->em.s.fIemExecutesAll;
745 break;
746 default:
747 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
748 }
749
750 return VINF_SUCCESS;
751}
752
753
754/**
755 * Raise a fatal error.
756 *
757 * Safely terminate the VM with full state report and stuff. This function
758 * will naturally never return.
759 *
760 * @param pVCpu The cross context virtual CPU structure.
761 * @param rc VBox status code.
762 */
763VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
764{
765 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
766 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
767}
768
769
770#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
771/**
772 * Gets the EM state name.
773 *
774 * @returns pointer to read only state name,
775 * @param enmState The state.
776 */
777static const char *emR3GetStateName(EMSTATE enmState)
778{
779 switch (enmState)
780 {
781 case EMSTATE_NONE: return "EMSTATE_NONE";
782 case EMSTATE_RAW: return "EMSTATE_RAW";
783 case EMSTATE_HM: return "EMSTATE_HM";
784 case EMSTATE_IEM: return "EMSTATE_IEM";
785 case EMSTATE_REM: return "EMSTATE_REM";
786 case EMSTATE_HALTED: return "EMSTATE_HALTED";
787 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
788 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
789 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
790 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
791 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
792 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
793 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
794 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
795 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
796 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
797 case EMSTATE_NEM: return "EMSTATE_NEM";
798 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
799 default: return "Unknown!";
800 }
801}
802#endif /* LOG_ENABLED || VBOX_STRICT */
803
804
805/**
806 * Debug loop.
807 *
808 * @returns VBox status code for EM.
809 * @param pVM The cross context VM structure.
810 * @param pVCpu The cross context virtual CPU structure.
811 * @param rc Current EM VBox status code.
812 */
813static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
814{
815 for (;;)
816 {
817 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
818 const VBOXSTRICTRC rcLast = rc;
819
820 /*
821 * Debug related RC.
822 */
823 switch (VBOXSTRICTRC_VAL(rc))
824 {
825 /*
826 * Single step an instruction.
827 */
828 case VINF_EM_DBG_STEP:
829 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
830 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
831 || pVCpu->em.s.fForceRAW /* paranoia */)
832#ifdef VBOX_WITH_RAW_MODE
833 rc = emR3RawStep(pVM, pVCpu);
834#else
835 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
836#endif
837 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
838 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
839 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
840 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
841#ifdef VBOX_WITH_REM
842 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
843 rc = emR3RemStep(pVM, pVCpu);
844#endif
845 else
846 {
847 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
848 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
849 rc = VINF_EM_DBG_STEPPED;
850 }
851 break;
852
853 /*
854 * Simple events: stepped, breakpoint, stop/assertion.
855 */
856 case VINF_EM_DBG_STEPPED:
857 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
858 break;
859
860 case VINF_EM_DBG_BREAKPOINT:
861 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
862 break;
863
864 case VINF_EM_DBG_STOP:
865 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
866 break;
867
868 case VINF_EM_DBG_EVENT:
869 rc = DBGFR3EventHandlePending(pVM, pVCpu);
870 break;
871
872 case VINF_EM_DBG_HYPER_STEPPED:
873 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
874 break;
875
876 case VINF_EM_DBG_HYPER_BREAKPOINT:
877 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
878 break;
879
880 case VINF_EM_DBG_HYPER_ASSERTION:
881 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
882 RTLogFlush(NULL);
883 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
884 break;
885
886 /*
887 * Guru meditation.
888 */
889 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
890 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
891 break;
892 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
893 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
894 break;
895 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
896 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
897 break;
898
899 default: /** @todo don't use default for guru, but make special errors code! */
900 {
901 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
902 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
903 break;
904 }
905 }
906
907 /*
908 * Process the result.
909 */
910 switch (VBOXSTRICTRC_VAL(rc))
911 {
912 /*
913 * Continue the debugging loop.
914 */
915 case VINF_EM_DBG_STEP:
916 case VINF_EM_DBG_STOP:
917 case VINF_EM_DBG_EVENT:
918 case VINF_EM_DBG_STEPPED:
919 case VINF_EM_DBG_BREAKPOINT:
920 case VINF_EM_DBG_HYPER_STEPPED:
921 case VINF_EM_DBG_HYPER_BREAKPOINT:
922 case VINF_EM_DBG_HYPER_ASSERTION:
923 break;
924
925 /*
926 * Resuming execution (in some form) has to be done here if we got
927 * a hypervisor debug event.
928 */
929 case VINF_SUCCESS:
930 case VINF_EM_RESUME:
931 case VINF_EM_SUSPEND:
932 case VINF_EM_RESCHEDULE:
933 case VINF_EM_RESCHEDULE_RAW:
934 case VINF_EM_RESCHEDULE_REM:
935 case VINF_EM_HALT:
936 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
937 {
938#ifdef VBOX_WITH_RAW_MODE
939 rc = emR3RawResumeHyper(pVM, pVCpu);
940 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
941 continue;
942#else
943 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
944#endif
945 }
946 if (rc == VINF_SUCCESS)
947 rc = VINF_EM_RESCHEDULE;
948 return rc;
949
950 /*
951 * The debugger isn't attached.
952 * We'll simply turn the thing off since that's the easiest thing to do.
953 */
954 case VERR_DBGF_NOT_ATTACHED:
955 switch (VBOXSTRICTRC_VAL(rcLast))
956 {
957 case VINF_EM_DBG_HYPER_STEPPED:
958 case VINF_EM_DBG_HYPER_BREAKPOINT:
959 case VINF_EM_DBG_HYPER_ASSERTION:
960 case VERR_TRPM_PANIC:
961 case VERR_TRPM_DONT_PANIC:
962 case VERR_VMM_RING0_ASSERTION:
963 case VERR_VMM_HYPER_CR3_MISMATCH:
964 case VERR_VMM_RING3_CALL_DISABLED:
965 return rcLast;
966 }
967 return VINF_EM_OFF;
968
969 /*
970 * Status codes terminating the VM in one or another sense.
971 */
972 case VINF_EM_TERMINATE:
973 case VINF_EM_OFF:
974 case VINF_EM_RESET:
975 case VINF_EM_NO_MEMORY:
976 case VINF_EM_RAW_STALE_SELECTOR:
977 case VINF_EM_RAW_IRET_TRAP:
978 case VERR_TRPM_PANIC:
979 case VERR_TRPM_DONT_PANIC:
980 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
981 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
982 case VERR_VMM_RING0_ASSERTION:
983 case VERR_VMM_HYPER_CR3_MISMATCH:
984 case VERR_VMM_RING3_CALL_DISABLED:
985 case VERR_INTERNAL_ERROR:
986 case VERR_INTERNAL_ERROR_2:
987 case VERR_INTERNAL_ERROR_3:
988 case VERR_INTERNAL_ERROR_4:
989 case VERR_INTERNAL_ERROR_5:
990 case VERR_IPE_UNEXPECTED_STATUS:
991 case VERR_IPE_UNEXPECTED_INFO_STATUS:
992 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
993 return rc;
994
995 /*
996 * The rest is unexpected, and will keep us here.
997 */
998 default:
999 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1000 break;
1001 }
1002 } /* debug for ever */
1003}
1004
1005
1006#if defined(VBOX_WITH_REM) || defined(DEBUG)
1007/**
1008 * Steps recompiled code.
1009 *
1010 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1011 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1012 *
1013 * @param pVM The cross context VM structure.
1014 * @param pVCpu The cross context virtual CPU structure.
1015 */
1016static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1017{
1018 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1019
1020# ifdef VBOX_WITH_REM
1021 EMRemLock(pVM);
1022
1023 /*
1024 * Switch to REM, step instruction, switch back.
1025 */
1026 int rc = REMR3State(pVM, pVCpu);
1027 if (RT_SUCCESS(rc))
1028 {
1029 rc = REMR3Step(pVM, pVCpu);
1030 REMR3StateBack(pVM, pVCpu);
1031 }
1032 EMRemUnlock(pVM);
1033
1034# else
1035 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1036# endif
1037
1038 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1039 return rc;
1040}
1041#endif /* VBOX_WITH_REM || DEBUG */
1042
1043
1044#ifdef VBOX_WITH_REM
1045/**
1046 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1047 * critical section.
1048 *
1049 * @returns false - new fInREMState value.
1050 * @param pVM The cross context VM structure.
1051 * @param pVCpu The cross context virtual CPU structure.
1052 */
1053DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1054{
1055 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1056 REMR3StateBack(pVM, pVCpu);
1057 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1058
1059 EMRemUnlock(pVM);
1060 return false;
1061}
1062#endif
1063
1064
1065/**
1066 * Executes recompiled code.
1067 *
1068 * This function contains the recompiler version of the inner
1069 * execution loop (the outer loop being in EMR3ExecuteVM()).
1070 *
1071 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1072 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1073 *
1074 * @param pVM The cross context VM structure.
1075 * @param pVCpu The cross context virtual CPU structure.
1076 * @param pfFFDone Where to store an indicator telling whether or not
1077 * FFs were done before returning.
1078 *
1079 */
1080static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1081{
1082#ifdef LOG_ENABLED
1083 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1084 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1085
1086 if (pCtx->eflags.Bits.u1VM)
1087 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1088 else
1089 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1090#endif
1091 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1092
1093#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1094 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1095 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1096 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1097#endif
1098
1099 /*
1100 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1101 * or the REM suggests raw-mode execution.
1102 */
1103 *pfFFDone = false;
1104#ifdef VBOX_WITH_REM
1105 bool fInREMState = false;
1106#else
1107 uint32_t cLoops = 0;
1108#endif
1109 int rc = VINF_SUCCESS;
1110 for (;;)
1111 {
1112#ifdef VBOX_WITH_REM
1113 /*
1114 * Lock REM and update the state if not already in sync.
1115 *
1116 * Note! Big lock, but you are not supposed to own any lock when
1117 * coming in here.
1118 */
1119 if (!fInREMState)
1120 {
1121 EMRemLock(pVM);
1122 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1123
1124 /* Flush the recompiler translation blocks if the VCPU has changed,
1125 also force a full CPU state resync. */
1126 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1127 {
1128 REMFlushTBs(pVM);
1129 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1130 }
1131 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1132
1133 rc = REMR3State(pVM, pVCpu);
1134
1135 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1136 if (RT_FAILURE(rc))
1137 break;
1138 fInREMState = true;
1139
1140 /*
1141 * We might have missed the raising of VMREQ, TIMER and some other
1142 * important FFs while we were busy switching the state. So, check again.
1143 */
1144 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1145 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1146 {
1147 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1148 goto l_REMDoForcedActions;
1149 }
1150 }
1151#endif
1152
1153 /*
1154 * Execute REM.
1155 */
1156 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1157 {
1158 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1159#ifdef VBOX_WITH_REM
1160 rc = REMR3Run(pVM, pVCpu);
1161#else
1162 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1163#endif
1164 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1165 }
1166 else
1167 {
1168 /* Give up this time slice; virtual time continues */
1169 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1170 RTThreadSleep(5);
1171 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1172 rc = VINF_SUCCESS;
1173 }
1174
1175 /*
1176 * Deal with high priority post execution FFs before doing anything
1177 * else. Sync back the state and leave the lock to be on the safe side.
1178 */
1179 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1180 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1181 {
1182#ifdef VBOX_WITH_REM
1183 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1184#endif
1185 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1186 }
1187
1188 /*
1189 * Process the returned status code.
1190 */
1191 if (rc != VINF_SUCCESS)
1192 {
1193 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1194 break;
1195 if (rc != VINF_REM_INTERRUPED_FF)
1196 {
1197#ifndef VBOX_WITH_REM
1198 /* Try dodge unimplemented IEM trouble by reschduling. */
1199 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1200 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1201 {
1202 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1203 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1204 {
1205 rc = VINF_EM_RESCHEDULE;
1206 break;
1207 }
1208 }
1209#endif
1210
1211 /*
1212 * Anything which is not known to us means an internal error
1213 * and the termination of the VM!
1214 */
1215 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1216 break;
1217 }
1218 }
1219
1220
1221 /*
1222 * Check and execute forced actions.
1223 *
1224 * Sync back the VM state and leave the lock before calling any of
1225 * these, you never know what's going to happen here.
1226 */
1227#ifdef VBOX_HIGH_RES_TIMERS_HACK
1228 TMTimerPollVoid(pVM, pVCpu);
1229#endif
1230 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1231 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1232 || VMCPU_FF_IS_PENDING(pVCpu,
1233 VMCPU_FF_ALL_REM_MASK
1234 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1235 {
1236#ifdef VBOX_WITH_REM
1237l_REMDoForcedActions:
1238 if (fInREMState)
1239 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1240#endif
1241 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1242 rc = emR3ForcedActions(pVM, pVCpu, rc);
1243 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1244 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1245 if ( rc != VINF_SUCCESS
1246 && rc != VINF_EM_RESCHEDULE_REM)
1247 {
1248 *pfFFDone = true;
1249 break;
1250 }
1251 }
1252
1253#ifndef VBOX_WITH_REM
1254 /*
1255 * Have to check if we can get back to fast execution mode every so often.
1256 */
1257 if (!(++cLoops & 7))
1258 {
1259 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1260 if ( enmCheck != EMSTATE_REM
1261 && enmCheck != EMSTATE_IEM_THEN_REM)
1262 return VINF_EM_RESCHEDULE;
1263 }
1264#endif
1265
1266 } /* The Inner Loop, recompiled execution mode version. */
1267
1268
1269#ifdef VBOX_WITH_REM
1270 /*
1271 * Returning. Sync back the VM state if required.
1272 */
1273 if (fInREMState)
1274 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1275#endif
1276
1277 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1278 return rc;
1279}
1280
1281
1282#ifdef DEBUG
1283
1284int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1285{
1286 EMSTATE enmOldState = pVCpu->em.s.enmState;
1287
1288 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1289
1290 Log(("Single step BEGIN:\n"));
1291 for (uint32_t i = 0; i < cIterations; i++)
1292 {
1293 DBGFR3PrgStep(pVCpu);
1294 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1295 emR3RemStep(pVM, pVCpu);
1296 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1297 break;
1298 }
1299 Log(("Single step END:\n"));
1300 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1301 pVCpu->em.s.enmState = enmOldState;
1302 return VINF_EM_RESCHEDULE;
1303}
1304
1305#endif /* DEBUG */
1306
1307
1308/**
1309 * Try execute the problematic code in IEM first, then fall back on REM if there
1310 * is too much of it or if IEM doesn't implement something.
1311 *
1312 * @returns Strict VBox status code from IEMExecLots.
1313 * @param pVM The cross context VM structure.
1314 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1315 * @param pfFFDone Force flags done indicator.
1316 *
1317 * @thread EMT(pVCpu)
1318 */
1319static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1320{
1321 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1322 *pfFFDone = false;
1323
1324 /*
1325 * Execute in IEM for a while.
1326 */
1327 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1328 {
1329 uint32_t cInstructions;
1330 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1331 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1332 if (rcStrict != VINF_SUCCESS)
1333 {
1334 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1335 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1336 break;
1337
1338 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1339 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1340 return rcStrict;
1341 }
1342
1343 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1344 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1345 {
1346 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1347 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1348 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1349 pVCpu->em.s.enmState = enmNewState;
1350 return VINF_SUCCESS;
1351 }
1352
1353 /*
1354 * Check for pending actions.
1355 */
1356 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1357 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1358 return VINF_SUCCESS;
1359 }
1360
1361 /*
1362 * Switch to REM.
1363 */
1364 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1365 pVCpu->em.s.enmState = EMSTATE_REM;
1366 return VINF_SUCCESS;
1367}
1368
1369
1370/**
1371 * Decides whether to execute RAW, HWACC or REM.
1372 *
1373 * @returns new EM state
1374 * @param pVM The cross context VM structure.
1375 * @param pVCpu The cross context virtual CPU structure.
1376 * @param pCtx Pointer to the guest CPU context.
1377 */
1378EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1379{
1380 /*
1381 * When forcing raw-mode execution, things are simple.
1382 */
1383 if (pVCpu->em.s.fForceRAW)
1384 return EMSTATE_RAW;
1385
1386 /*
1387 * We stay in the wait for SIPI state unless explicitly told otherwise.
1388 */
1389 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1390 return EMSTATE_WAIT_SIPI;
1391
1392 /*
1393 * Execute everything in IEM?
1394 */
1395 if (pVM->em.s.fIemExecutesAll)
1396 return EMSTATE_IEM;
1397
1398 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1399 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1400 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1401
1402 X86EFLAGS EFlags = pCtx->eflags;
1403 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1404 {
1405 if (EMIsHwVirtExecutionEnabled(pVM))
1406 {
1407 if (VM_IS_HM_ENABLED(pVM))
1408 {
1409 if (HMR3CanExecuteGuest(pVM, pCtx))
1410 return EMSTATE_HM;
1411 }
1412 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1413 return EMSTATE_NEM;
1414
1415 /*
1416 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1417 * turns off monitoring features essential for raw mode!
1418 */
1419 return EMSTATE_IEM_THEN_REM;
1420 }
1421 }
1422
1423 /*
1424 * Standard raw-mode:
1425 *
1426 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1427 * or 32 bits protected mode ring 0 code
1428 *
1429 * The tests are ordered by the likelihood of being true during normal execution.
1430 */
1431 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1432 {
1433 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1434 return EMSTATE_REM;
1435 }
1436
1437# ifndef VBOX_RAW_V86
1438 if (EFlags.u32 & X86_EFL_VM) {
1439 Log2(("raw mode refused: VM_MASK\n"));
1440 return EMSTATE_REM;
1441 }
1442# endif
1443
1444 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1445 uint32_t u32CR0 = pCtx->cr0;
1446 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1447 {
1448 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1449 return EMSTATE_REM;
1450 }
1451
1452 if (pCtx->cr4 & X86_CR4_PAE)
1453 {
1454 uint32_t u32Dummy, u32Features;
1455
1456 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1457 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1458 return EMSTATE_REM;
1459 }
1460
1461 unsigned uSS = pCtx->ss.Sel;
1462 if ( pCtx->eflags.Bits.u1VM
1463 || (uSS & X86_SEL_RPL) == 3)
1464 {
1465 if (!EMIsRawRing3Enabled(pVM))
1466 return EMSTATE_REM;
1467
1468 if (!(EFlags.u32 & X86_EFL_IF))
1469 {
1470 Log2(("raw mode refused: IF (RawR3)\n"));
1471 return EMSTATE_REM;
1472 }
1473
1474 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1475 {
1476 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1477 return EMSTATE_REM;
1478 }
1479 }
1480 else
1481 {
1482 if (!EMIsRawRing0Enabled(pVM))
1483 return EMSTATE_REM;
1484
1485 if (EMIsRawRing1Enabled(pVM))
1486 {
1487 /* Only ring 0 and 1 supervisor code. */
1488 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1489 {
1490 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1491 return EMSTATE_REM;
1492 }
1493 }
1494 /* Only ring 0 supervisor code. */
1495 else if ((uSS & X86_SEL_RPL) != 0)
1496 {
1497 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1498 return EMSTATE_REM;
1499 }
1500
1501 // Let's start with pure 32 bits ring 0 code first
1502 /** @todo What's pure 32-bit mode? flat? */
1503 if ( !(pCtx->ss.Attr.n.u1DefBig)
1504 || !(pCtx->cs.Attr.n.u1DefBig))
1505 {
1506 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1507 return EMSTATE_REM;
1508 }
1509
1510 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1511 if (!(u32CR0 & X86_CR0_WP))
1512 {
1513 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1514 return EMSTATE_REM;
1515 }
1516
1517# ifdef VBOX_WITH_RAW_MODE
1518 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1519 {
1520 Log2(("raw r0 mode forced: patch code\n"));
1521# ifdef VBOX_WITH_SAFE_STR
1522 Assert(pCtx->tr.Sel);
1523# endif
1524 return EMSTATE_RAW;
1525 }
1526# endif /* VBOX_WITH_RAW_MODE */
1527
1528# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1529 if (!(EFlags.u32 & X86_EFL_IF))
1530 {
1531 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1532 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1533 return EMSTATE_REM;
1534 }
1535# endif
1536
1537# ifndef VBOX_WITH_RAW_RING1
1538 /** @todo still necessary??? */
1539 if (EFlags.Bits.u2IOPL != 0)
1540 {
1541 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1542 return EMSTATE_REM;
1543 }
1544# endif
1545 }
1546
1547 /*
1548 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1549 */
1550 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1551 {
1552 Log2(("raw mode refused: stale CS\n"));
1553 return EMSTATE_REM;
1554 }
1555 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1556 {
1557 Log2(("raw mode refused: stale SS\n"));
1558 return EMSTATE_REM;
1559 }
1560 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1561 {
1562 Log2(("raw mode refused: stale DS\n"));
1563 return EMSTATE_REM;
1564 }
1565 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1566 {
1567 Log2(("raw mode refused: stale ES\n"));
1568 return EMSTATE_REM;
1569 }
1570 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1571 {
1572 Log2(("raw mode refused: stale FS\n"));
1573 return EMSTATE_REM;
1574 }
1575 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1576 {
1577 Log2(("raw mode refused: stale GS\n"));
1578 return EMSTATE_REM;
1579 }
1580
1581# ifdef VBOX_WITH_SAFE_STR
1582 if (pCtx->tr.Sel == 0)
1583 {
1584 Log(("Raw mode refused -> TR=0\n"));
1585 return EMSTATE_REM;
1586 }
1587# endif
1588
1589 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1590 return EMSTATE_RAW;
1591}
1592
1593
1594/**
1595 * Executes all high priority post execution force actions.
1596 *
1597 * @returns rc or a fatal status code.
1598 *
1599 * @param pVM The cross context VM structure.
1600 * @param pVCpu The cross context virtual CPU structure.
1601 * @param rc The current rc.
1602 */
1603int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1604{
1605 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1606
1607 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1608 PDMCritSectBothFF(pVCpu);
1609
1610 /* Update CR3 (Nested Paging case for HM). */
1611 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1612 {
1613 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1614 if (RT_FAILURE(rc2))
1615 return rc2;
1616 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1617 }
1618
1619 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1620 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1621 {
1622 if (CPUMIsGuestInPAEMode(pVCpu))
1623 {
1624 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1625 AssertPtr(pPdpes);
1626
1627 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1628 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1629 }
1630 else
1631 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1632 }
1633
1634 /* IEM has pending work (typically memory write after INS instruction). */
1635 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1636 rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
1637
1638 /* IOM has pending work (comitting an I/O or MMIO write). */
1639 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1640 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1641
1642#ifdef VBOX_WITH_RAW_MODE
1643 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1644 CSAMR3DoPendingAction(pVM, pVCpu);
1645#endif
1646
1647 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1648 {
1649 if ( rc > VINF_EM_NO_MEMORY
1650 && rc <= VINF_EM_LAST)
1651 rc = VINF_EM_NO_MEMORY;
1652 }
1653
1654 return rc;
1655}
1656
1657#ifdef VBOX_WITH_NESTED_HWVIRT
1658/**
1659 * Helper for emR3ForcedActions() for injecting interrupts into the
1660 * nested-guest.
1661 *
1662 * @returns VBox status code.
1663 * @param pVCpu The cross context virtual CPU structure.
1664 * @param pCtx Pointer to the nested-guest CPU context.
1665 * @param pfResched Where to store whether a reschedule is required.
1666 * @param pfInject Where to store whether an interrupt was injected (and if
1667 * a wake up is pending).
1668 */
1669static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1670{
1671 *pfResched = false;
1672 *pfInject = false;
1673 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1674 {
1675 PVM pVM = pVCpu->CTX_SUFF(pVM);
1676 bool fGif = pCtx->hwvirt.fGif;
1677#ifdef VBOX_WITH_RAW_MODE
1678 fGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1679#endif
1680 if (fGif)
1681 {
1682 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1683 {
1684 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1685 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1686 {
1687 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1688 {
1689 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1690 if (RT_SUCCESS(rcStrict))
1691 {
1692 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1693 * doesn't intercept HLT but intercepts INTR? */
1694 *pfResched = true;
1695 return VINF_EM_RESCHEDULE;
1696 }
1697
1698 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1699 return VINF_EM_TRIPLE_FAULT;
1700 }
1701
1702 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1703 /** @todo this really isn't nice, should properly handle this */
1704 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1705 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1706 || rc == VINF_EM_RESCHEDULE_HM
1707 || rc == VINF_EM_RESCHEDULE_RAW))
1708 {
1709 rc = VINF_EM_RESCHEDULE;
1710 }
1711
1712 *pfResched = true;
1713 *pfInject = true;
1714 return rc;
1715 }
1716 }
1717
1718 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1719 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1720 {
1721 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1722 {
1723 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1724 if (RT_SUCCESS(rcStrict))
1725 {
1726 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1727 * doesn't intercept HLT but intercepts VINTR? */
1728 *pfResched = true;
1729 return VINF_EM_RESCHEDULE;
1730 }
1731
1732 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1733 return VINF_EM_TRIPLE_FAULT;
1734 }
1735
1736 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1737 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1738 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1739 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1740 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1741
1742 *pfResched = true;
1743 *pfInject = true;
1744 return VINF_EM_RESCHEDULE;
1745 }
1746 }
1747 return VINF_SUCCESS;
1748 }
1749
1750 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1751 { /** @todo Nested VMX. */ }
1752
1753 /* Shouldn't really get here. */
1754 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1755 return VERR_EM_INTERNAL_ERROR;
1756}
1757#endif
1758
1759/**
1760 * Executes all pending forced actions.
1761 *
1762 * Forced actions can cause execution delays and execution
1763 * rescheduling. The first we deal with using action priority, so
1764 * that for instance pending timers aren't scheduled and ran until
1765 * right before execution. The rescheduling we deal with using
1766 * return codes. The same goes for VM termination, only in that case
1767 * we exit everything.
1768 *
1769 * @returns VBox status code of equal or greater importance/severity than rc.
1770 * The most important ones are: VINF_EM_RESCHEDULE,
1771 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1772 *
1773 * @param pVM The cross context VM structure.
1774 * @param pVCpu The cross context virtual CPU structure.
1775 * @param rc The current rc.
1776 *
1777 */
1778int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1779{
1780 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1781#ifdef VBOX_STRICT
1782 int rcIrq = VINF_SUCCESS;
1783#endif
1784 int rc2;
1785#define UPDATE_RC() \
1786 do { \
1787 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1788 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1789 break; \
1790 if (!rc || rc2 < rc) \
1791 rc = rc2; \
1792 } while (0)
1793 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1794
1795 /*
1796 * Post execution chunk first.
1797 */
1798 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1799 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1800 {
1801 /*
1802 * EMT Rendezvous (must be serviced before termination).
1803 */
1804 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1805 {
1806 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1807 UPDATE_RC();
1808 /** @todo HACK ALERT! The following test is to make sure EM+TM
1809 * thinks the VM is stopped/reset before the next VM state change
1810 * is made. We need a better solution for this, or at least make it
1811 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1812 * VINF_EM_SUSPEND). */
1813 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1814 {
1815 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1816 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1817 return rc;
1818 }
1819 }
1820
1821 /*
1822 * State change request (cleared by vmR3SetStateLocked).
1823 */
1824 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1825 {
1826 VMSTATE enmState = VMR3GetState(pVM);
1827 switch (enmState)
1828 {
1829 case VMSTATE_FATAL_ERROR:
1830 case VMSTATE_FATAL_ERROR_LS:
1831 case VMSTATE_GURU_MEDITATION:
1832 case VMSTATE_GURU_MEDITATION_LS:
1833 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1834 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1835 return VINF_EM_SUSPEND;
1836
1837 case VMSTATE_DESTROYING:
1838 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1839 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1840 return VINF_EM_TERMINATE;
1841
1842 default:
1843 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1844 }
1845 }
1846
1847 /*
1848 * Debugger Facility polling.
1849 */
1850 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1851 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1852 {
1853 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1854 UPDATE_RC();
1855 }
1856
1857 /*
1858 * Postponed reset request.
1859 */
1860 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1861 {
1862 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1863 UPDATE_RC();
1864 }
1865
1866#ifdef VBOX_WITH_RAW_MODE
1867 /*
1868 * CSAM page scanning.
1869 */
1870 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1871 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1872 {
1873 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1874
1875 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1876 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1877
1878 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1879 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1880 }
1881#endif
1882
1883 /*
1884 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1885 */
1886 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1887 {
1888 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1889 UPDATE_RC();
1890 if (rc == VINF_EM_NO_MEMORY)
1891 return rc;
1892 }
1893
1894 /* check that we got them all */
1895 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1896 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1897 }
1898
1899 /*
1900 * Normal priority then.
1901 * (Executed in no particular order.)
1902 */
1903 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1904 {
1905 /*
1906 * PDM Queues are pending.
1907 */
1908 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1909 PDMR3QueueFlushAll(pVM);
1910
1911 /*
1912 * PDM DMA transfers are pending.
1913 */
1914 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1915 PDMR3DmaRun(pVM);
1916
1917 /*
1918 * EMT Rendezvous (make sure they are handled before the requests).
1919 */
1920 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1921 {
1922 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1923 UPDATE_RC();
1924 /** @todo HACK ALERT! The following test is to make sure EM+TM
1925 * thinks the VM is stopped/reset before the next VM state change
1926 * is made. We need a better solution for this, or at least make it
1927 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1928 * VINF_EM_SUSPEND). */
1929 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1930 {
1931 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1932 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1933 return rc;
1934 }
1935 }
1936
1937 /*
1938 * Requests from other threads.
1939 */
1940 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1941 {
1942 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1943 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1944 {
1945 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1946 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1947 return rc2;
1948 }
1949 UPDATE_RC();
1950 /** @todo HACK ALERT! The following test is to make sure EM+TM
1951 * thinks the VM is stopped/reset before the next VM state change
1952 * is made. We need a better solution for this, or at least make it
1953 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1954 * VINF_EM_SUSPEND). */
1955 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1956 {
1957 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1958 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1959 return rc;
1960 }
1961 }
1962
1963#ifdef VBOX_WITH_REM
1964 /* Replay the handler notification changes. */
1965 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1966 {
1967 /* Try not to cause deadlocks. */
1968 if ( pVM->cCpus == 1
1969 || ( !PGMIsLockOwner(pVM)
1970 && !IOMIsLockWriteOwner(pVM))
1971 )
1972 {
1973 EMRemLock(pVM);
1974 REMR3ReplayHandlerNotifications(pVM);
1975 EMRemUnlock(pVM);
1976 }
1977 }
1978#endif
1979
1980 /* check that we got them all */
1981 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1982 }
1983
1984 /*
1985 * Normal priority then. (per-VCPU)
1986 * (Executed in no particular order.)
1987 */
1988 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1989 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1990 {
1991 /*
1992 * Requests from other threads.
1993 */
1994 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1995 {
1996 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1997 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1998 {
1999 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2000 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2001 return rc2;
2002 }
2003 UPDATE_RC();
2004 /** @todo HACK ALERT! The following test is to make sure EM+TM
2005 * thinks the VM is stopped/reset before the next VM state change
2006 * is made. We need a better solution for this, or at least make it
2007 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2008 * VINF_EM_SUSPEND). */
2009 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2010 {
2011 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2012 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2013 return rc;
2014 }
2015 }
2016
2017 /* check that we got them all */
2018 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2019 }
2020
2021 /*
2022 * High priority pre execution chunk last.
2023 * (Executed in ascending priority order.)
2024 */
2025 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2026 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2027 {
2028 /*
2029 * Timers before interrupts.
2030 */
2031 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2032 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2033 TMR3TimerQueuesDo(pVM);
2034
2035 /*
2036 * Pick up asynchronously posted interrupts into the APIC.
2037 */
2038 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2039 APICUpdatePendingInterrupts(pVCpu);
2040
2041 /*
2042 * The instruction following an emulated STI should *always* be executed!
2043 *
2044 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2045 * the eip is the same as the inhibited instr address. Before we
2046 * are able to execute this instruction in raw mode (iret to
2047 * guest code) an external interrupt might force a world switch
2048 * again. Possibly allowing a guest interrupt to be dispatched
2049 * in the process. This could break the guest. Sounds very
2050 * unlikely, but such timing sensitive problem are not as rare as
2051 * you might think.
2052 */
2053 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2054 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2055 {
2056 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2057 {
2058 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2059 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2060 }
2061 else
2062 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2063 }
2064
2065 /*
2066 * Interrupts.
2067 */
2068 bool fWakeupPending = false;
2069 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2070 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2071 {
2072 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2073 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2074 {
2075 Assert(!HMR3IsEventPending(pVCpu));
2076 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2077#ifdef VBOX_WITH_NESTED_HWVIRT
2078 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2079 {
2080 bool fResched, fInject;
2081 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2082 if (fInject)
2083 {
2084 fWakeupPending = true;
2085#ifdef VBOX_STRICT
2086 rcIrq = rc2;
2087#endif
2088 }
2089 if (fResched)
2090 UPDATE_RC();
2091 }
2092 else
2093#endif
2094 {
2095 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2096#ifdef VBOX_WITH_NESTED_HWVIRT
2097 && pCtx->hwvirt.fGif
2098#endif
2099#ifdef VBOX_WITH_RAW_MODE
2100 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2101#endif
2102 && pCtx->eflags.Bits.u1IF)
2103 {
2104 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2105 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2106 /** @todo this really isn't nice, should properly handle this */
2107 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2108 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2109 || rc2 == VINF_EM_RESCHEDULE_HM
2110 || rc2 == VINF_EM_RESCHEDULE_RAW))
2111 {
2112 rc2 = VINF_EM_RESCHEDULE;
2113 }
2114#ifdef VBOX_STRICT
2115 rcIrq = rc2;
2116#endif
2117 UPDATE_RC();
2118 /* Reschedule required: We must not miss the wakeup below! */
2119 fWakeupPending = true;
2120 }
2121 }
2122 }
2123 }
2124
2125 /*
2126 * Allocate handy pages.
2127 */
2128 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2129 {
2130 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2131 UPDATE_RC();
2132 }
2133
2134 /*
2135 * Debugger Facility request.
2136 */
2137 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2138 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2139 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2140 {
2141 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2142 UPDATE_RC();
2143 }
2144
2145 /*
2146 * EMT Rendezvous (must be serviced before termination).
2147 */
2148 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2149 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2150 {
2151 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2152 UPDATE_RC();
2153 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2154 * stopped/reset before the next VM state change is made. We need a better
2155 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2156 * && rc >= VINF_EM_SUSPEND). */
2157 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2158 {
2159 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2160 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2161 return rc;
2162 }
2163 }
2164
2165 /*
2166 * State change request (cleared by vmR3SetStateLocked).
2167 */
2168 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2169 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2170 {
2171 VMSTATE enmState = VMR3GetState(pVM);
2172 switch (enmState)
2173 {
2174 case VMSTATE_FATAL_ERROR:
2175 case VMSTATE_FATAL_ERROR_LS:
2176 case VMSTATE_GURU_MEDITATION:
2177 case VMSTATE_GURU_MEDITATION_LS:
2178 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2179 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2180 return VINF_EM_SUSPEND;
2181
2182 case VMSTATE_DESTROYING:
2183 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2184 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2185 return VINF_EM_TERMINATE;
2186
2187 default:
2188 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2189 }
2190 }
2191
2192 /*
2193 * Out of memory? Since most of our fellow high priority actions may cause us
2194 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2195 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2196 * than us since we can terminate without allocating more memory.
2197 */
2198 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2199 {
2200 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2201 UPDATE_RC();
2202 if (rc == VINF_EM_NO_MEMORY)
2203 return rc;
2204 }
2205
2206 /*
2207 * If the virtual sync clock is still stopped, make TM restart it.
2208 */
2209 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2210 TMR3VirtualSyncFF(pVM, pVCpu);
2211
2212#ifdef DEBUG
2213 /*
2214 * Debug, pause the VM.
2215 */
2216 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2217 {
2218 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2219 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2220 return VINF_EM_SUSPEND;
2221 }
2222#endif
2223
2224 /* check that we got them all */
2225 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2226 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2227 }
2228
2229#undef UPDATE_RC
2230 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2231 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2232 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2233 return rc;
2234}
2235
2236
2237/**
2238 * Check if the preset execution time cap restricts guest execution scheduling.
2239 *
2240 * @returns true if allowed, false otherwise
2241 * @param pVM The cross context VM structure.
2242 * @param pVCpu The cross context virtual CPU structure.
2243 */
2244bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2245{
2246 uint64_t u64UserTime, u64KernelTime;
2247
2248 if ( pVM->uCpuExecutionCap != 100
2249 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2250 {
2251 uint64_t u64TimeNow = RTTimeMilliTS();
2252 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2253 {
2254 /* New time slice. */
2255 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2256 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2257 pVCpu->em.s.u64TimeSliceExec = 0;
2258 }
2259 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2260
2261 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2262 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2263 return false;
2264 }
2265 return true;
2266}
2267
2268
2269/**
2270 * Execute VM.
2271 *
2272 * This function is the main loop of the VM. The emulation thread
2273 * calls this function when the VM has been successfully constructed
2274 * and we're ready for executing the VM.
2275 *
2276 * Returning from this function means that the VM is turned off or
2277 * suspended (state already saved) and deconstruction is next in line.
2278 *
2279 * All interaction from other thread are done using forced actions
2280 * and signaling of the wait object.
2281 *
2282 * @returns VBox status code, informational status codes may indicate failure.
2283 * @param pVM The cross context VM structure.
2284 * @param pVCpu The cross context virtual CPU structure.
2285 */
2286VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2287{
2288 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2289 pVM,
2290 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2291 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2292 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2293 pVCpu->em.s.fForceRAW));
2294 VM_ASSERT_EMT(pVM);
2295 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2296 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2297 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2298 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2299
2300 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2301 if (rc == 0)
2302 {
2303 /*
2304 * Start the virtual time.
2305 */
2306 TMR3NotifyResume(pVM, pVCpu);
2307
2308 /*
2309 * The Outer Main Loop.
2310 */
2311 bool fFFDone = false;
2312
2313 /* Reschedule right away to start in the right state. */
2314 rc = VINF_SUCCESS;
2315
2316 /* If resuming after a pause or a state load, restore the previous
2317 state or else we'll start executing code. Else, just reschedule. */
2318 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2319 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2320 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2321 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2322 else
2323 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2324 pVCpu->em.s.cIemThenRemInstructions = 0;
2325 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2326
2327 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2328 for (;;)
2329 {
2330 /*
2331 * Before we can schedule anything (we're here because
2332 * scheduling is required) we must service any pending
2333 * forced actions to avoid any pending action causing
2334 * immediate rescheduling upon entering an inner loop
2335 *
2336 * Do forced actions.
2337 */
2338 if ( !fFFDone
2339 && RT_SUCCESS(rc)
2340 && rc != VINF_EM_TERMINATE
2341 && rc != VINF_EM_OFF
2342 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2343 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2344 {
2345 rc = emR3ForcedActions(pVM, pVCpu, rc);
2346 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2347 if ( ( rc == VINF_EM_RESCHEDULE_REM
2348 || rc == VINF_EM_RESCHEDULE_HM)
2349 && pVCpu->em.s.fForceRAW)
2350 rc = VINF_EM_RESCHEDULE_RAW;
2351 }
2352 else if (fFFDone)
2353 fFFDone = false;
2354
2355 /*
2356 * Now what to do?
2357 */
2358 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2359 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2360 switch (rc)
2361 {
2362 /*
2363 * Keep doing what we're currently doing.
2364 */
2365 case VINF_SUCCESS:
2366 break;
2367
2368 /*
2369 * Reschedule - to raw-mode execution.
2370 */
2371/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2372 case VINF_EM_RESCHEDULE_RAW:
2373 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2374 if (VM_IS_RAW_MODE_ENABLED(pVM))
2375 {
2376 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2377 pVCpu->em.s.enmState = EMSTATE_RAW;
2378 }
2379 else
2380 {
2381 AssertLogRelFailed();
2382 pVCpu->em.s.enmState = EMSTATE_NONE;
2383 }
2384 break;
2385
2386 /*
2387 * Reschedule - to HM or NEM.
2388 */
2389 case VINF_EM_RESCHEDULE_HM:
2390 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2391 Assert(!pVCpu->em.s.fForceRAW);
2392 if (VM_IS_HM_ENABLED(pVM))
2393 {
2394 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2395 pVCpu->em.s.enmState = EMSTATE_HM;
2396 }
2397 else if (VM_IS_NEM_ENABLED(pVM))
2398 {
2399 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2400 pVCpu->em.s.enmState = EMSTATE_NEM;
2401 }
2402 else
2403 {
2404 AssertLogRelFailed();
2405 pVCpu->em.s.enmState = EMSTATE_NONE;
2406 }
2407 break;
2408
2409 /*
2410 * Reschedule - to recompiled execution.
2411 */
2412 case VINF_EM_RESCHEDULE_REM:
2413 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2414 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2415 {
2416 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2417 enmOldState, EMSTATE_IEM_THEN_REM));
2418 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2419 {
2420 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2421 pVCpu->em.s.cIemThenRemInstructions = 0;
2422 }
2423 }
2424 else
2425 {
2426 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2427 pVCpu->em.s.enmState = EMSTATE_REM;
2428 }
2429 break;
2430
2431 /*
2432 * Resume.
2433 */
2434 case VINF_EM_RESUME:
2435 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2436 /* Don't reschedule in the halted or wait for SIPI case. */
2437 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2438 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2439 {
2440 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2441 break;
2442 }
2443 /* fall through and get scheduled. */
2444 RT_FALL_THRU();
2445
2446 /*
2447 * Reschedule.
2448 */
2449 case VINF_EM_RESCHEDULE:
2450 {
2451 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2452 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2453 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2454 pVCpu->em.s.cIemThenRemInstructions = 0;
2455 pVCpu->em.s.enmState = enmState;
2456 break;
2457 }
2458
2459 /*
2460 * Halted.
2461 */
2462 case VINF_EM_HALT:
2463 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2464 pVCpu->em.s.enmState = EMSTATE_HALTED;
2465 break;
2466
2467 /*
2468 * Switch to the wait for SIPI state (application processor only)
2469 */
2470 case VINF_EM_WAIT_SIPI:
2471 Assert(pVCpu->idCpu != 0);
2472 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2473 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2474 break;
2475
2476
2477 /*
2478 * Suspend.
2479 */
2480 case VINF_EM_SUSPEND:
2481 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2482 Assert(enmOldState != EMSTATE_SUSPENDED);
2483 pVCpu->em.s.enmPrevState = enmOldState;
2484 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2485 break;
2486
2487 /*
2488 * Reset.
2489 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2490 */
2491 case VINF_EM_RESET:
2492 {
2493 if (pVCpu->idCpu == 0)
2494 {
2495 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2496 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2497 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2498 pVCpu->em.s.cIemThenRemInstructions = 0;
2499 pVCpu->em.s.enmState = enmState;
2500 }
2501 else
2502 {
2503 /* All other VCPUs go into the wait for SIPI state. */
2504 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2505 }
2506 break;
2507 }
2508
2509 /*
2510 * Power Off.
2511 */
2512 case VINF_EM_OFF:
2513 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2514 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2515 TMR3NotifySuspend(pVM, pVCpu);
2516 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2517 return rc;
2518
2519 /*
2520 * Terminate the VM.
2521 */
2522 case VINF_EM_TERMINATE:
2523 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2524 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2525 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2526 TMR3NotifySuspend(pVM, pVCpu);
2527 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2528 return rc;
2529
2530
2531 /*
2532 * Out of memory, suspend the VM and stuff.
2533 */
2534 case VINF_EM_NO_MEMORY:
2535 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2536 Assert(enmOldState != EMSTATE_SUSPENDED);
2537 pVCpu->em.s.enmPrevState = enmOldState;
2538 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2539 TMR3NotifySuspend(pVM, pVCpu);
2540 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2541
2542 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2543 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2544 if (rc != VINF_EM_SUSPEND)
2545 {
2546 if (RT_SUCCESS_NP(rc))
2547 {
2548 AssertLogRelMsgFailed(("%Rrc\n", rc));
2549 rc = VERR_EM_INTERNAL_ERROR;
2550 }
2551 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2552 }
2553 return rc;
2554
2555 /*
2556 * Guest debug events.
2557 */
2558 case VINF_EM_DBG_STEPPED:
2559 case VINF_EM_DBG_STOP:
2560 case VINF_EM_DBG_EVENT:
2561 case VINF_EM_DBG_BREAKPOINT:
2562 case VINF_EM_DBG_STEP:
2563 if (enmOldState == EMSTATE_RAW)
2564 {
2565 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2566 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2567 }
2568 else if (enmOldState == EMSTATE_HM)
2569 {
2570 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2571 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2572 }
2573 else if (enmOldState == EMSTATE_NEM)
2574 {
2575 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2576 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2577 }
2578 else if (enmOldState == EMSTATE_REM)
2579 {
2580 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2581 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2582 }
2583 else
2584 {
2585 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2586 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2587 }
2588 break;
2589
2590 /*
2591 * Hypervisor debug events.
2592 */
2593 case VINF_EM_DBG_HYPER_STEPPED:
2594 case VINF_EM_DBG_HYPER_BREAKPOINT:
2595 case VINF_EM_DBG_HYPER_ASSERTION:
2596 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2597 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2598 break;
2599
2600 /*
2601 * Triple fault.
2602 */
2603 case VINF_EM_TRIPLE_FAULT:
2604 if (!pVM->em.s.fGuruOnTripleFault)
2605 {
2606 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2607 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2608 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2609 continue;
2610 }
2611 /* Else fall through and trigger a guru. */
2612 RT_FALL_THRU();
2613
2614 case VERR_VMM_RING0_ASSERTION:
2615 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2616 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2617 break;
2618
2619 /*
2620 * Any error code showing up here other than the ones we
2621 * know and process above are considered to be FATAL.
2622 *
2623 * Unknown warnings and informational status codes are also
2624 * included in this.
2625 */
2626 default:
2627 if (RT_SUCCESS_NP(rc))
2628 {
2629 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2630 rc = VERR_EM_INTERNAL_ERROR;
2631 }
2632 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2633 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2634 break;
2635 }
2636
2637 /*
2638 * Act on state transition.
2639 */
2640 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2641 if (enmOldState != enmNewState)
2642 {
2643 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2644
2645 /* Clear MWait flags and the unhalt FF. */
2646 if ( enmOldState == EMSTATE_HALTED
2647 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2648 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2649 && ( enmNewState == EMSTATE_RAW
2650 || enmNewState == EMSTATE_HM
2651 || enmNewState == EMSTATE_NEM
2652 || enmNewState == EMSTATE_REM
2653 || enmNewState == EMSTATE_IEM_THEN_REM
2654 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2655 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2656 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2657 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2658 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2659 {
2660 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2661 {
2662 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2663 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2664 }
2665 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2666 {
2667 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2668 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2669 }
2670 }
2671 }
2672 else
2673 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2674
2675 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2676 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2677
2678 /*
2679 * Act on the new state.
2680 */
2681 switch (enmNewState)
2682 {
2683 /*
2684 * Execute raw.
2685 */
2686 case EMSTATE_RAW:
2687#ifdef VBOX_WITH_RAW_MODE
2688 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2689#else
2690 AssertLogRelMsgFailed(("%Rrc\n", rc));
2691 rc = VERR_EM_INTERNAL_ERROR;
2692#endif
2693 break;
2694
2695 /*
2696 * Execute hardware accelerated raw.
2697 */
2698 case EMSTATE_HM:
2699 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2700 break;
2701
2702 /*
2703 * Execute hardware accelerated raw.
2704 */
2705 case EMSTATE_NEM:
2706 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2707 break;
2708
2709 /*
2710 * Execute recompiled.
2711 */
2712 case EMSTATE_REM:
2713 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2714 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2715 break;
2716
2717 /*
2718 * Execute in the interpreter.
2719 */
2720 case EMSTATE_IEM:
2721 {
2722#if 0 /* For testing purposes. */
2723 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2724 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2725 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2726 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2727 rc = VINF_SUCCESS;
2728 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2729#endif
2730 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2731 if (pVM->em.s.fIemExecutesAll)
2732 {
2733 Assert(rc != VINF_EM_RESCHEDULE_REM);
2734 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2735 Assert(rc != VINF_EM_RESCHEDULE_HM);
2736 }
2737 fFFDone = false;
2738 break;
2739 }
2740
2741 /*
2742 * Execute in IEM, hoping we can quickly switch aback to HM
2743 * or RAW execution. If our hopes fail, we go to REM.
2744 */
2745 case EMSTATE_IEM_THEN_REM:
2746 {
2747 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2748 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2749 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2750 break;
2751 }
2752
2753 /*
2754 * Application processor execution halted until SIPI.
2755 */
2756 case EMSTATE_WAIT_SIPI:
2757 /* no break */
2758 /*
2759 * hlt - execution halted until interrupt.
2760 */
2761 case EMSTATE_HALTED:
2762 {
2763 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2764 /* If HM (or someone else) store a pending interrupt in
2765 TRPM, it must be dispatched ASAP without any halting.
2766 Anything pending in TRPM has been accepted and the CPU
2767 should already be the right state to receive it. */
2768 if (TRPMHasTrap(pVCpu))
2769 rc = VINF_EM_RESCHEDULE;
2770 /* MWAIT has a special extension where it's woken up when
2771 an interrupt is pending even when IF=0. */
2772 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2773 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2774 {
2775 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2776 if (rc == VINF_SUCCESS)
2777 {
2778 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2779 APICUpdatePendingInterrupts(pVCpu);
2780
2781 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2782 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2783 {
2784 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2785 rc = VINF_EM_RESCHEDULE;
2786 }
2787 }
2788 }
2789 else
2790 {
2791 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2792 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2793 check VMCPU_FF_UPDATE_APIC here. */
2794 if ( rc == VINF_SUCCESS
2795 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2796 {
2797 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2798 rc = VINF_EM_RESCHEDULE;
2799 }
2800 }
2801
2802 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2803 break;
2804 }
2805
2806 /*
2807 * Suspended - return to VM.cpp.
2808 */
2809 case EMSTATE_SUSPENDED:
2810 TMR3NotifySuspend(pVM, pVCpu);
2811 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2812 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2813 return VINF_EM_SUSPEND;
2814
2815 /*
2816 * Debugging in the guest.
2817 */
2818 case EMSTATE_DEBUG_GUEST_RAW:
2819 case EMSTATE_DEBUG_GUEST_HM:
2820 case EMSTATE_DEBUG_GUEST_NEM:
2821 case EMSTATE_DEBUG_GUEST_IEM:
2822 case EMSTATE_DEBUG_GUEST_REM:
2823 TMR3NotifySuspend(pVM, pVCpu);
2824 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2825 TMR3NotifyResume(pVM, pVCpu);
2826 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2827 break;
2828
2829 /*
2830 * Debugging in the hypervisor.
2831 */
2832 case EMSTATE_DEBUG_HYPER:
2833 {
2834 TMR3NotifySuspend(pVM, pVCpu);
2835 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2836
2837 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2838 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2839 if (rc != VINF_SUCCESS)
2840 {
2841 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2842 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2843 else
2844 {
2845 /* switch to guru meditation mode */
2846 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2847 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2848 VMMR3FatalDump(pVM, pVCpu, rc);
2849 }
2850 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2851 return rc;
2852 }
2853
2854 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2855 TMR3NotifyResume(pVM, pVCpu);
2856 break;
2857 }
2858
2859 /*
2860 * Guru meditation takes place in the debugger.
2861 */
2862 case EMSTATE_GURU_MEDITATION:
2863 {
2864 TMR3NotifySuspend(pVM, pVCpu);
2865 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2866 VMMR3FatalDump(pVM, pVCpu, rc);
2867 emR3Debug(pVM, pVCpu, rc);
2868 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2869 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2870 return rc;
2871 }
2872
2873 /*
2874 * The states we don't expect here.
2875 */
2876 case EMSTATE_NONE:
2877 case EMSTATE_TERMINATING:
2878 default:
2879 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2880 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2881 TMR3NotifySuspend(pVM, pVCpu);
2882 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2883 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2884 return VERR_EM_INTERNAL_ERROR;
2885 }
2886 } /* The Outer Main Loop */
2887 }
2888 else
2889 {
2890 /*
2891 * Fatal error.
2892 */
2893 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2894 TMR3NotifySuspend(pVM, pVCpu);
2895 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2896 VMMR3FatalDump(pVM, pVCpu, rc);
2897 emR3Debug(pVM, pVCpu, rc);
2898 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2899 /** @todo change the VM state! */
2900 return rc;
2901 }
2902
2903 /* not reached */
2904}
2905
2906/**
2907 * Notify EM of a state change (used by FTM)
2908 *
2909 * @param pVM The cross context VM structure.
2910 */
2911VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2912{
2913 PVMCPU pVCpu = VMMGetCpu(pVM);
2914
2915 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2916 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2917 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2918 return VINF_SUCCESS;
2919}
2920
2921/**
2922 * Notify EM of a state change (used by FTM)
2923 *
2924 * @param pVM The cross context VM structure.
2925 */
2926VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2927{
2928 PVMCPU pVCpu = VMMGetCpu(pVM);
2929 EMSTATE enmCurState = pVCpu->em.s.enmState;
2930
2931 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2932 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2933 pVCpu->em.s.enmPrevState = enmCurState;
2934 return VINF_SUCCESS;
2935}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette