VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72565

Last change on this file since 72565 was 72565, checked in by vboxsync, 7 years ago

EM,TRPM: Record TSC with exit history in raw-mode. Added the exit history counter to the statistic (/PROF/CPUx/EM/RecordedExits). bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 139.3 KB
Line 
1/* $Id: EM.cpp 72565 2018-06-15 13:30:01Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94#if defined(VBOX_WITH_REM) || defined(DEBUG)
95static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
96#endif
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
153
154#ifdef VBOX_WITH_REM
155 /*
156 * Initialize the REM critical section.
157 */
158 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
159 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
160 AssertRCReturn(rc, rc);
161#endif
162
163 /*
164 * Saved state.
165 */
166 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
167 NULL, NULL, NULL,
168 NULL, emR3Save, NULL,
169 NULL, emR3Load, NULL);
170 if (RT_FAILURE(rc))
171 return rc;
172
173 for (VMCPUID i = 0; i < pVM->cCpus; i++)
174 {
175 PVMCPU pVCpu = &pVM->aCpus[i];
176
177 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
178 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
179 pVCpu->em.s.fForceRAW = false;
180
181 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
182#ifdef VBOX_WITH_RAW_MODE
183 if (VM_IS_RAW_MODE_ENABLED(pVM))
184 {
185 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
186 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
187 }
188#endif
189
190 /* Force reset of the time slice. */
191 pVCpu->em.s.u64TimeSliceStart = 0;
192
193# define EM_REG_COUNTER(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_COUNTER_USED(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
199 AssertRC(rc);
200
201# define EM_REG_PROFILE(a, b, c) \
202 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
203 AssertRC(rc);
204
205# define EM_REG_PROFILE_ADV(a, b, c) \
206 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
207 AssertRC(rc);
208
209 /*
210 * Statistics.
211 */
212#ifdef VBOX_WITH_STATISTICS
213 PEMSTATS pStats;
214 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 pVCpu->em.s.pStatsR3 = pStats;
219 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
220 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
221
222 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
223 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
224
225 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
227
228 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
302
303 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
304 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
305
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
358
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
387
388 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
390 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
392
393 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
432#endif /* VBOX_WITH_STATISTICS */
433 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
434 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
435#ifdef VBOX_WITH_STATISTICS
436 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
440 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
441 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
442#endif /* VBOX_WITH_STATISTICS */
443
444 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
445 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
446 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
447 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
448 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
449
450 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
451
452 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
453 "Number of recorded exits (R0/RC).", "/PROF/CPU%u/EM/RecordedExits", i);
454 AssertRC(rc);
455
456 }
457
458 emR3InitDbg(pVM);
459 return VINF_SUCCESS;
460}
461
462
463/**
464 * Applies relocations to data and code managed by this
465 * component. This function will be called at init and
466 * whenever the VMM need to relocate it self inside the GC.
467 *
468 * @param pVM The cross context VM structure.
469 */
470VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
471{
472 LogFlow(("EMR3Relocate\n"));
473 for (VMCPUID i = 0; i < pVM->cCpus; i++)
474 {
475 PVMCPU pVCpu = &pVM->aCpus[i];
476 if (pVCpu->em.s.pStatsR3)
477 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
478 }
479}
480
481
482/**
483 * Reset the EM state for a CPU.
484 *
485 * Called by EMR3Reset and hot plugging.
486 *
487 * @param pVCpu The cross context virtual CPU structure.
488 */
489VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
490{
491 /* Reset scheduling state. */
492 pVCpu->em.s.fForceRAW = false;
493 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
494
495 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
496 out of the HALTED state here so that enmPrevState doesn't end up as
497 HALTED when EMR3Execute returns. */
498 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
499 {
500 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
501 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
502 }
503}
504
505
506/**
507 * Reset notification.
508 *
509 * @param pVM The cross context VM structure.
510 */
511VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
512{
513 Log(("EMR3Reset: \n"));
514 for (VMCPUID i = 0; i < pVM->cCpus; i++)
515 EMR3ResetCpu(&pVM->aCpus[i]);
516}
517
518
519/**
520 * Terminates the EM.
521 *
522 * Termination means cleaning up and freeing all resources,
523 * the VM it self is at this point powered off or suspended.
524 *
525 * @returns VBox status code.
526 * @param pVM The cross context VM structure.
527 */
528VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
529{
530 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
531
532#ifdef VBOX_WITH_REM
533 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
534#else
535 RT_NOREF(pVM);
536#endif
537 return VINF_SUCCESS;
538}
539
540
541/**
542 * Execute state save operation.
543 *
544 * @returns VBox status code.
545 * @param pVM The cross context VM structure.
546 * @param pSSM SSM operation handle.
547 */
548static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
549{
550 for (VMCPUID i = 0; i < pVM->cCpus; i++)
551 {
552 PVMCPU pVCpu = &pVM->aCpus[i];
553
554 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
555
556 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
557 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
558 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
559
560 /* Save mwait state. */
561 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
562 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
563 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
564 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
565 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
566 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
567 AssertRCReturn(rc, rc);
568 }
569 return VINF_SUCCESS;
570}
571
572
573/**
574 * Execute state load operation.
575 *
576 * @returns VBox status code.
577 * @param pVM The cross context VM structure.
578 * @param pSSM SSM operation handle.
579 * @param uVersion Data layout version.
580 * @param uPass The data pass.
581 */
582static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
583{
584 /*
585 * Validate version.
586 */
587 if ( uVersion > EM_SAVED_STATE_VERSION
588 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
589 {
590 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
591 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
592 }
593 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
594
595 /*
596 * Load the saved state.
597 */
598 for (VMCPUID i = 0; i < pVM->cCpus; i++)
599 {
600 PVMCPU pVCpu = &pVM->aCpus[i];
601
602 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
603 if (RT_FAILURE(rc))
604 pVCpu->em.s.fForceRAW = false;
605 AssertRCReturn(rc, rc);
606
607 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
608 {
609 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
610 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
611 AssertRCReturn(rc, rc);
612 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
613
614 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
615 }
616 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
617 {
618 /* Load mwait state. */
619 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
620 AssertRCReturn(rc, rc);
621 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
622 AssertRCReturn(rc, rc);
623 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
624 AssertRCReturn(rc, rc);
625 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
626 AssertRCReturn(rc, rc);
627 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
628 AssertRCReturn(rc, rc);
629 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
630 AssertRCReturn(rc, rc);
631 }
632
633 Assert(!pVCpu->em.s.pCliStatTree);
634 }
635 return VINF_SUCCESS;
636}
637
638
639/**
640 * Argument packet for emR3SetExecutionPolicy.
641 */
642struct EMR3SETEXECPOLICYARGS
643{
644 EMEXECPOLICY enmPolicy;
645 bool fEnforce;
646};
647
648
649/**
650 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
651 */
652static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
653{
654 /*
655 * Only the first CPU changes the variables.
656 */
657 if (pVCpu->idCpu == 0)
658 {
659 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
660 switch (pArgs->enmPolicy)
661 {
662 case EMEXECPOLICY_RECOMPILE_RING0:
663 pVM->fRecompileSupervisor = pArgs->fEnforce;
664 break;
665 case EMEXECPOLICY_RECOMPILE_RING3:
666 pVM->fRecompileUser = pArgs->fEnforce;
667 break;
668 case EMEXECPOLICY_IEM_ALL:
669 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
670 break;
671 default:
672 AssertFailedReturn(VERR_INVALID_PARAMETER);
673 }
674 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
675 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
676 }
677
678 /*
679 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
680 */
681 return pVCpu->em.s.enmState == EMSTATE_RAW
682 || pVCpu->em.s.enmState == EMSTATE_HM
683 || pVCpu->em.s.enmState == EMSTATE_NEM
684 || pVCpu->em.s.enmState == EMSTATE_IEM
685 || pVCpu->em.s.enmState == EMSTATE_REM
686 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
687 ? VINF_EM_RESCHEDULE
688 : VINF_SUCCESS;
689}
690
691
692/**
693 * Changes an execution scheduling policy parameter.
694 *
695 * This is used to enable or disable raw-mode / hardware-virtualization
696 * execution of user and supervisor code.
697 *
698 * @returns VINF_SUCCESS on success.
699 * @returns VINF_RESCHEDULE if a rescheduling might be required.
700 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
701 *
702 * @param pUVM The user mode VM handle.
703 * @param enmPolicy The scheduling policy to change.
704 * @param fEnforce Whether to enforce the policy or not.
705 */
706VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
707{
708 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
709 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
710 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
711
712 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
713 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
714}
715
716
717/**
718 * Queries an execution scheduling policy parameter.
719 *
720 * @returns VBox status code
721 * @param pUVM The user mode VM handle.
722 * @param enmPolicy The scheduling policy to query.
723 * @param pfEnforced Where to return the current value.
724 */
725VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
726{
727 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
728 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
729 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
730 PVM pVM = pUVM->pVM;
731 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
732
733 /* No need to bother EMTs with a query. */
734 switch (enmPolicy)
735 {
736 case EMEXECPOLICY_RECOMPILE_RING0:
737 *pfEnforced = pVM->fRecompileSupervisor;
738 break;
739 case EMEXECPOLICY_RECOMPILE_RING3:
740 *pfEnforced = pVM->fRecompileUser;
741 break;
742 case EMEXECPOLICY_IEM_ALL:
743 *pfEnforced = pVM->em.s.fIemExecutesAll;
744 break;
745 default:
746 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
747 }
748
749 return VINF_SUCCESS;
750}
751
752
753/**
754 * Queries the main execution engine of the VM.
755 *
756 * @returns VBox status code
757 * @param pUVM The user mode VM handle.
758 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
759 */
760VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
761{
762 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
763 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
764
765 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
766 PVM pVM = pUVM->pVM;
767 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
768
769 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
770 return VINF_SUCCESS;
771}
772
773
774/**
775 * Raise a fatal error.
776 *
777 * Safely terminate the VM with full state report and stuff. This function
778 * will naturally never return.
779 *
780 * @param pVCpu The cross context virtual CPU structure.
781 * @param rc VBox status code.
782 */
783VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
784{
785 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
786 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
787}
788
789
790#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
791/**
792 * Gets the EM state name.
793 *
794 * @returns pointer to read only state name,
795 * @param enmState The state.
796 */
797static const char *emR3GetStateName(EMSTATE enmState)
798{
799 switch (enmState)
800 {
801 case EMSTATE_NONE: return "EMSTATE_NONE";
802 case EMSTATE_RAW: return "EMSTATE_RAW";
803 case EMSTATE_HM: return "EMSTATE_HM";
804 case EMSTATE_IEM: return "EMSTATE_IEM";
805 case EMSTATE_REM: return "EMSTATE_REM";
806 case EMSTATE_HALTED: return "EMSTATE_HALTED";
807 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
808 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
809 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
810 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
811 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
812 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
813 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
814 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
815 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
816 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
817 case EMSTATE_NEM: return "EMSTATE_NEM";
818 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
819 default: return "Unknown!";
820 }
821}
822#endif /* LOG_ENABLED || VBOX_STRICT */
823
824
825/**
826 * Handle pending ring-3 I/O port write.
827 *
828 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
829 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
830 *
831 * @returns Strict VBox status code.
832 * @param pVM The cross context VM structure.
833 * @param pVCpu The cross context virtual CPU structure.
834 */
835VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
836{
837 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
838
839 /* Get and clear the pending data. */
840 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
841 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
842 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
843 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
844 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
845
846 /* Assert sanity. */
847 switch (cbValue)
848 {
849 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
850 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
851 case 4: break;
852 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
853 }
854 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
855
856 /* Do the work.*/
857 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
858 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
859 if (IOM_SUCCESS(rcStrict))
860 {
861 pVCpu->cpum.GstCtx.rip += cbInstr;
862 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
863 }
864 return rcStrict;
865}
866
867
868/**
869 * Handle pending ring-3 I/O port write.
870 *
871 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
872 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
873 *
874 * @returns Strict VBox status code.
875 * @param pVM The cross context VM structure.
876 * @param pVCpu The cross context virtual CPU structure.
877 */
878VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
879{
880 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
881
882 /* Get and clear the pending data. */
883 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
884 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
885 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
886 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
887
888 /* Assert sanity. */
889 switch (cbValue)
890 {
891 case 1: break;
892 case 2: break;
893 case 4: break;
894 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
895 }
896 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
897 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
898
899 /* Do the work.*/
900 uint32_t uValue = 0;
901 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
902 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
903 if (IOM_SUCCESS(rcStrict))
904 {
905 if (cbValue == 4)
906 pVCpu->cpum.GstCtx.rax = uValue;
907 else if (cbValue == 2)
908 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
909 else
910 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
911 pVCpu->cpum.GstCtx.rip += cbInstr;
912 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
913 }
914 return rcStrict;
915}
916
917
918/**
919 * Debug loop.
920 *
921 * @returns VBox status code for EM.
922 * @param pVM The cross context VM structure.
923 * @param pVCpu The cross context virtual CPU structure.
924 * @param rc Current EM VBox status code.
925 */
926static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
927{
928 for (;;)
929 {
930 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
931 const VBOXSTRICTRC rcLast = rc;
932
933 /*
934 * Debug related RC.
935 */
936 switch (VBOXSTRICTRC_VAL(rc))
937 {
938 /*
939 * Single step an instruction.
940 */
941 case VINF_EM_DBG_STEP:
942 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
943 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
944 || pVCpu->em.s.fForceRAW /* paranoia */)
945#ifdef VBOX_WITH_RAW_MODE
946 rc = emR3RawStep(pVM, pVCpu);
947#else
948 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
949#endif
950 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
951 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
952 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
953 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
954#ifdef VBOX_WITH_REM
955 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
956 rc = emR3RemStep(pVM, pVCpu);
957#endif
958 else
959 {
960 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
961 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
962 rc = VINF_EM_DBG_STEPPED;
963 }
964 break;
965
966 /*
967 * Simple events: stepped, breakpoint, stop/assertion.
968 */
969 case VINF_EM_DBG_STEPPED:
970 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
971 break;
972
973 case VINF_EM_DBG_BREAKPOINT:
974 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
975 break;
976
977 case VINF_EM_DBG_STOP:
978 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
979 break;
980
981 case VINF_EM_DBG_EVENT:
982 rc = DBGFR3EventHandlePending(pVM, pVCpu);
983 break;
984
985 case VINF_EM_DBG_HYPER_STEPPED:
986 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
987 break;
988
989 case VINF_EM_DBG_HYPER_BREAKPOINT:
990 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
991 break;
992
993 case VINF_EM_DBG_HYPER_ASSERTION:
994 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
995 RTLogFlush(NULL);
996 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
997 break;
998
999 /*
1000 * Guru meditation.
1001 */
1002 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
1003 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
1004 break;
1005 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
1006 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
1007 break;
1008 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
1009 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
1010 break;
1011
1012 default: /** @todo don't use default for guru, but make special errors code! */
1013 {
1014 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1015 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
1016 break;
1017 }
1018 }
1019
1020 /*
1021 * Process the result.
1022 */
1023 switch (VBOXSTRICTRC_VAL(rc))
1024 {
1025 /*
1026 * Continue the debugging loop.
1027 */
1028 case VINF_EM_DBG_STEP:
1029 case VINF_EM_DBG_STOP:
1030 case VINF_EM_DBG_EVENT:
1031 case VINF_EM_DBG_STEPPED:
1032 case VINF_EM_DBG_BREAKPOINT:
1033 case VINF_EM_DBG_HYPER_STEPPED:
1034 case VINF_EM_DBG_HYPER_BREAKPOINT:
1035 case VINF_EM_DBG_HYPER_ASSERTION:
1036 break;
1037
1038 /*
1039 * Resuming execution (in some form) has to be done here if we got
1040 * a hypervisor debug event.
1041 */
1042 case VINF_SUCCESS:
1043 case VINF_EM_RESUME:
1044 case VINF_EM_SUSPEND:
1045 case VINF_EM_RESCHEDULE:
1046 case VINF_EM_RESCHEDULE_RAW:
1047 case VINF_EM_RESCHEDULE_REM:
1048 case VINF_EM_HALT:
1049 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1050 {
1051#ifdef VBOX_WITH_RAW_MODE
1052 rc = emR3RawResumeHyper(pVM, pVCpu);
1053 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
1054 continue;
1055#else
1056 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
1057#endif
1058 }
1059 if (rc == VINF_SUCCESS)
1060 rc = VINF_EM_RESCHEDULE;
1061 return rc;
1062
1063 /*
1064 * The debugger isn't attached.
1065 * We'll simply turn the thing off since that's the easiest thing to do.
1066 */
1067 case VERR_DBGF_NOT_ATTACHED:
1068 switch (VBOXSTRICTRC_VAL(rcLast))
1069 {
1070 case VINF_EM_DBG_HYPER_STEPPED:
1071 case VINF_EM_DBG_HYPER_BREAKPOINT:
1072 case VINF_EM_DBG_HYPER_ASSERTION:
1073 case VERR_TRPM_PANIC:
1074 case VERR_TRPM_DONT_PANIC:
1075 case VERR_VMM_RING0_ASSERTION:
1076 case VERR_VMM_HYPER_CR3_MISMATCH:
1077 case VERR_VMM_RING3_CALL_DISABLED:
1078 return rcLast;
1079 }
1080 return VINF_EM_OFF;
1081
1082 /*
1083 * Status codes terminating the VM in one or another sense.
1084 */
1085 case VINF_EM_TERMINATE:
1086 case VINF_EM_OFF:
1087 case VINF_EM_RESET:
1088 case VINF_EM_NO_MEMORY:
1089 case VINF_EM_RAW_STALE_SELECTOR:
1090 case VINF_EM_RAW_IRET_TRAP:
1091 case VERR_TRPM_PANIC:
1092 case VERR_TRPM_DONT_PANIC:
1093 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1094 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1095 case VERR_VMM_RING0_ASSERTION:
1096 case VERR_VMM_HYPER_CR3_MISMATCH:
1097 case VERR_VMM_RING3_CALL_DISABLED:
1098 case VERR_INTERNAL_ERROR:
1099 case VERR_INTERNAL_ERROR_2:
1100 case VERR_INTERNAL_ERROR_3:
1101 case VERR_INTERNAL_ERROR_4:
1102 case VERR_INTERNAL_ERROR_5:
1103 case VERR_IPE_UNEXPECTED_STATUS:
1104 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1105 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1106 return rc;
1107
1108 /*
1109 * The rest is unexpected, and will keep us here.
1110 */
1111 default:
1112 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1113 break;
1114 }
1115 } /* debug for ever */
1116}
1117
1118
1119#if defined(VBOX_WITH_REM) || defined(DEBUG)
1120/**
1121 * Steps recompiled code.
1122 *
1123 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1124 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1125 *
1126 * @param pVM The cross context VM structure.
1127 * @param pVCpu The cross context virtual CPU structure.
1128 */
1129static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1130{
1131 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1132
1133# ifdef VBOX_WITH_REM
1134 EMRemLock(pVM);
1135
1136 /*
1137 * Switch to REM, step instruction, switch back.
1138 */
1139 int rc = REMR3State(pVM, pVCpu);
1140 if (RT_SUCCESS(rc))
1141 {
1142 rc = REMR3Step(pVM, pVCpu);
1143 REMR3StateBack(pVM, pVCpu);
1144 }
1145 EMRemUnlock(pVM);
1146
1147# else
1148 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1149# endif
1150
1151 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1152 return rc;
1153}
1154#endif /* VBOX_WITH_REM || DEBUG */
1155
1156
1157#ifdef VBOX_WITH_REM
1158/**
1159 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1160 * critical section.
1161 *
1162 * @returns false - new fInREMState value.
1163 * @param pVM The cross context VM structure.
1164 * @param pVCpu The cross context virtual CPU structure.
1165 */
1166DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1167{
1168 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1169 REMR3StateBack(pVM, pVCpu);
1170 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1171
1172 EMRemUnlock(pVM);
1173 return false;
1174}
1175#endif
1176
1177
1178/**
1179 * Executes recompiled code.
1180 *
1181 * This function contains the recompiler version of the inner
1182 * execution loop (the outer loop being in EMR3ExecuteVM()).
1183 *
1184 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1185 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1186 *
1187 * @param pVM The cross context VM structure.
1188 * @param pVCpu The cross context virtual CPU structure.
1189 * @param pfFFDone Where to store an indicator telling whether or not
1190 * FFs were done before returning.
1191 *
1192 */
1193static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1194{
1195#ifdef LOG_ENABLED
1196 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1197 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1198
1199 if (pCtx->eflags.Bits.u1VM)
1200 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1201 else
1202 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1203#endif
1204 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1205
1206#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1207 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1208 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1209 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1210#endif
1211
1212 /*
1213 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1214 * or the REM suggests raw-mode execution.
1215 */
1216 *pfFFDone = false;
1217#ifdef VBOX_WITH_REM
1218 bool fInREMState = false;
1219#else
1220 uint32_t cLoops = 0;
1221#endif
1222 int rc = VINF_SUCCESS;
1223 for (;;)
1224 {
1225#ifdef VBOX_WITH_REM
1226 /*
1227 * Lock REM and update the state if not already in sync.
1228 *
1229 * Note! Big lock, but you are not supposed to own any lock when
1230 * coming in here.
1231 */
1232 if (!fInREMState)
1233 {
1234 EMRemLock(pVM);
1235 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1236
1237 /* Flush the recompiler translation blocks if the VCPU has changed,
1238 also force a full CPU state resync. */
1239 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1240 {
1241 REMFlushTBs(pVM);
1242 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1243 }
1244 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1245
1246 rc = REMR3State(pVM, pVCpu);
1247
1248 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1249 if (RT_FAILURE(rc))
1250 break;
1251 fInREMState = true;
1252
1253 /*
1254 * We might have missed the raising of VMREQ, TIMER and some other
1255 * important FFs while we were busy switching the state. So, check again.
1256 */
1257 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1258 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1259 {
1260 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1261 goto l_REMDoForcedActions;
1262 }
1263 }
1264#endif
1265
1266 /*
1267 * Execute REM.
1268 */
1269 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1270 {
1271 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1272#ifdef VBOX_WITH_REM
1273 rc = REMR3Run(pVM, pVCpu);
1274#else
1275 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1276#endif
1277 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1278 }
1279 else
1280 {
1281 /* Give up this time slice; virtual time continues */
1282 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1283 RTThreadSleep(5);
1284 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1285 rc = VINF_SUCCESS;
1286 }
1287
1288 /*
1289 * Deal with high priority post execution FFs before doing anything
1290 * else. Sync back the state and leave the lock to be on the safe side.
1291 */
1292 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1293 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1294 {
1295#ifdef VBOX_WITH_REM
1296 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1297#endif
1298 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1299 }
1300
1301 /*
1302 * Process the returned status code.
1303 */
1304 if (rc != VINF_SUCCESS)
1305 {
1306 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1307 break;
1308 if (rc != VINF_REM_INTERRUPED_FF)
1309 {
1310#ifndef VBOX_WITH_REM
1311 /* Try dodge unimplemented IEM trouble by reschduling. */
1312 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1313 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1314 {
1315 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1316 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1317 {
1318 rc = VINF_EM_RESCHEDULE;
1319 break;
1320 }
1321 }
1322#endif
1323
1324 /*
1325 * Anything which is not known to us means an internal error
1326 * and the termination of the VM!
1327 */
1328 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1329 break;
1330 }
1331 }
1332
1333
1334 /*
1335 * Check and execute forced actions.
1336 *
1337 * Sync back the VM state and leave the lock before calling any of
1338 * these, you never know what's going to happen here.
1339 */
1340#ifdef VBOX_HIGH_RES_TIMERS_HACK
1341 TMTimerPollVoid(pVM, pVCpu);
1342#endif
1343 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1344 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1345 || VMCPU_FF_IS_PENDING(pVCpu,
1346 VMCPU_FF_ALL_REM_MASK
1347 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1348 {
1349#ifdef VBOX_WITH_REM
1350l_REMDoForcedActions:
1351 if (fInREMState)
1352 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1353#endif
1354 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1355 rc = emR3ForcedActions(pVM, pVCpu, rc);
1356 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1357 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1358 if ( rc != VINF_SUCCESS
1359 && rc != VINF_EM_RESCHEDULE_REM)
1360 {
1361 *pfFFDone = true;
1362 break;
1363 }
1364 }
1365
1366#ifndef VBOX_WITH_REM
1367 /*
1368 * Have to check if we can get back to fast execution mode every so often.
1369 */
1370 if (!(++cLoops & 7))
1371 {
1372 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1373 if ( enmCheck != EMSTATE_REM
1374 && enmCheck != EMSTATE_IEM_THEN_REM)
1375 return VINF_EM_RESCHEDULE;
1376 }
1377#endif
1378
1379 } /* The Inner Loop, recompiled execution mode version. */
1380
1381
1382#ifdef VBOX_WITH_REM
1383 /*
1384 * Returning. Sync back the VM state if required.
1385 */
1386 if (fInREMState)
1387 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1388#endif
1389
1390 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1391 return rc;
1392}
1393
1394
1395#ifdef DEBUG
1396
1397int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1398{
1399 EMSTATE enmOldState = pVCpu->em.s.enmState;
1400
1401 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1402
1403 Log(("Single step BEGIN:\n"));
1404 for (uint32_t i = 0; i < cIterations; i++)
1405 {
1406 DBGFR3PrgStep(pVCpu);
1407 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1408 emR3RemStep(pVM, pVCpu);
1409 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1410 break;
1411 }
1412 Log(("Single step END:\n"));
1413 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1414 pVCpu->em.s.enmState = enmOldState;
1415 return VINF_EM_RESCHEDULE;
1416}
1417
1418#endif /* DEBUG */
1419
1420
1421/**
1422 * Try execute the problematic code in IEM first, then fall back on REM if there
1423 * is too much of it or if IEM doesn't implement something.
1424 *
1425 * @returns Strict VBox status code from IEMExecLots.
1426 * @param pVM The cross context VM structure.
1427 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1428 * @param pfFFDone Force flags done indicator.
1429 *
1430 * @thread EMT(pVCpu)
1431 */
1432static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1433{
1434 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1435 *pfFFDone = false;
1436
1437 /*
1438 * Execute in IEM for a while.
1439 */
1440 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1441 {
1442 uint32_t cInstructions;
1443 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1444 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1445 if (rcStrict != VINF_SUCCESS)
1446 {
1447 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1448 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1449 break;
1450
1451 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1452 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1453 return rcStrict;
1454 }
1455
1456 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1457 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1458 {
1459 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1460 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1461 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1462 pVCpu->em.s.enmState = enmNewState;
1463 return VINF_SUCCESS;
1464 }
1465
1466 /*
1467 * Check for pending actions.
1468 */
1469 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1470 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1471 return VINF_SUCCESS;
1472 }
1473
1474 /*
1475 * Switch to REM.
1476 */
1477 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1478 pVCpu->em.s.enmState = EMSTATE_REM;
1479 return VINF_SUCCESS;
1480}
1481
1482
1483/**
1484 * Decides whether to execute RAW, HWACC or REM.
1485 *
1486 * @returns new EM state
1487 * @param pVM The cross context VM structure.
1488 * @param pVCpu The cross context virtual CPU structure.
1489 * @param pCtx Pointer to the guest CPU context.
1490 */
1491EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1492{
1493 /*
1494 * When forcing raw-mode execution, things are simple.
1495 */
1496 if (pVCpu->em.s.fForceRAW)
1497 return EMSTATE_RAW;
1498
1499 /*
1500 * We stay in the wait for SIPI state unless explicitly told otherwise.
1501 */
1502 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1503 return EMSTATE_WAIT_SIPI;
1504
1505 /*
1506 * Execute everything in IEM?
1507 */
1508 if (pVM->em.s.fIemExecutesAll)
1509 return EMSTATE_IEM;
1510
1511 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1512 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1513 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1514
1515 X86EFLAGS EFlags = pCtx->eflags;
1516 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1517 {
1518 if (EMIsHwVirtExecutionEnabled(pVM))
1519 {
1520 if (VM_IS_HM_ENABLED(pVM))
1521 {
1522 if (HMR3CanExecuteGuest(pVM, pCtx))
1523 return EMSTATE_HM;
1524 }
1525 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1526 return EMSTATE_NEM;
1527
1528 /*
1529 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1530 * turns off monitoring features essential for raw mode!
1531 */
1532 return EMSTATE_IEM_THEN_REM;
1533 }
1534 }
1535
1536 /*
1537 * Standard raw-mode:
1538 *
1539 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1540 * or 32 bits protected mode ring 0 code
1541 *
1542 * The tests are ordered by the likelihood of being true during normal execution.
1543 */
1544 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1545 {
1546 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1547 return EMSTATE_REM;
1548 }
1549
1550# ifndef VBOX_RAW_V86
1551 if (EFlags.u32 & X86_EFL_VM) {
1552 Log2(("raw mode refused: VM_MASK\n"));
1553 return EMSTATE_REM;
1554 }
1555# endif
1556
1557 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1558 uint32_t u32CR0 = pCtx->cr0;
1559 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1560 {
1561 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1562 return EMSTATE_REM;
1563 }
1564
1565 if (pCtx->cr4 & X86_CR4_PAE)
1566 {
1567 uint32_t u32Dummy, u32Features;
1568
1569 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1570 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1571 return EMSTATE_REM;
1572 }
1573
1574 unsigned uSS = pCtx->ss.Sel;
1575 if ( pCtx->eflags.Bits.u1VM
1576 || (uSS & X86_SEL_RPL) == 3)
1577 {
1578 if (!EMIsRawRing3Enabled(pVM))
1579 return EMSTATE_REM;
1580
1581 if (!(EFlags.u32 & X86_EFL_IF))
1582 {
1583 Log2(("raw mode refused: IF (RawR3)\n"));
1584 return EMSTATE_REM;
1585 }
1586
1587 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1588 {
1589 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1590 return EMSTATE_REM;
1591 }
1592 }
1593 else
1594 {
1595 if (!EMIsRawRing0Enabled(pVM))
1596 return EMSTATE_REM;
1597
1598 if (EMIsRawRing1Enabled(pVM))
1599 {
1600 /* Only ring 0 and 1 supervisor code. */
1601 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1602 {
1603 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1604 return EMSTATE_REM;
1605 }
1606 }
1607 /* Only ring 0 supervisor code. */
1608 else if ((uSS & X86_SEL_RPL) != 0)
1609 {
1610 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1611 return EMSTATE_REM;
1612 }
1613
1614 // Let's start with pure 32 bits ring 0 code first
1615 /** @todo What's pure 32-bit mode? flat? */
1616 if ( !(pCtx->ss.Attr.n.u1DefBig)
1617 || !(pCtx->cs.Attr.n.u1DefBig))
1618 {
1619 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1620 return EMSTATE_REM;
1621 }
1622
1623 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1624 if (!(u32CR0 & X86_CR0_WP))
1625 {
1626 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1627 return EMSTATE_REM;
1628 }
1629
1630# ifdef VBOX_WITH_RAW_MODE
1631 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1632 {
1633 Log2(("raw r0 mode forced: patch code\n"));
1634# ifdef VBOX_WITH_SAFE_STR
1635 Assert(pCtx->tr.Sel);
1636# endif
1637 return EMSTATE_RAW;
1638 }
1639# endif /* VBOX_WITH_RAW_MODE */
1640
1641# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1642 if (!(EFlags.u32 & X86_EFL_IF))
1643 {
1644 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1645 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1646 return EMSTATE_REM;
1647 }
1648# endif
1649
1650# ifndef VBOX_WITH_RAW_RING1
1651 /** @todo still necessary??? */
1652 if (EFlags.Bits.u2IOPL != 0)
1653 {
1654 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1655 return EMSTATE_REM;
1656 }
1657# endif
1658 }
1659
1660 /*
1661 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1662 */
1663 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1664 {
1665 Log2(("raw mode refused: stale CS\n"));
1666 return EMSTATE_REM;
1667 }
1668 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1669 {
1670 Log2(("raw mode refused: stale SS\n"));
1671 return EMSTATE_REM;
1672 }
1673 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1674 {
1675 Log2(("raw mode refused: stale DS\n"));
1676 return EMSTATE_REM;
1677 }
1678 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1679 {
1680 Log2(("raw mode refused: stale ES\n"));
1681 return EMSTATE_REM;
1682 }
1683 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1684 {
1685 Log2(("raw mode refused: stale FS\n"));
1686 return EMSTATE_REM;
1687 }
1688 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1689 {
1690 Log2(("raw mode refused: stale GS\n"));
1691 return EMSTATE_REM;
1692 }
1693
1694# ifdef VBOX_WITH_SAFE_STR
1695 if (pCtx->tr.Sel == 0)
1696 {
1697 Log(("Raw mode refused -> TR=0\n"));
1698 return EMSTATE_REM;
1699 }
1700# endif
1701
1702 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1703 return EMSTATE_RAW;
1704}
1705
1706
1707/**
1708 * Executes all high priority post execution force actions.
1709 *
1710 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1711 * fatal error status code.
1712 *
1713 * @param pVM The cross context VM structure.
1714 * @param pVCpu The cross context virtual CPU structure.
1715 * @param rc The current strict VBox status code rc.
1716 */
1717VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1718{
1719 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1720
1721 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1722 PDMCritSectBothFF(pVCpu);
1723
1724 /* Update CR3 (Nested Paging case for HM). */
1725 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1726 {
1727 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1728 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1729 if (RT_FAILURE(rc2))
1730 return rc2;
1731 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1732 }
1733
1734 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1735 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1736 {
1737 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1738 if (CPUMIsGuestInPAEMode(pVCpu))
1739 {
1740 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1741 AssertPtr(pPdpes);
1742
1743 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1744 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1745 }
1746 else
1747 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1748 }
1749
1750 /* IEM has pending work (typically memory write after INS instruction). */
1751 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1752 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1753
1754 /* IOM has pending work (comitting an I/O or MMIO write). */
1755 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1756 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1757
1758#ifdef VBOX_WITH_RAW_MODE
1759 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1760 CSAMR3DoPendingAction(pVM, pVCpu);
1761#endif
1762
1763 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1764 {
1765 if ( rc > VINF_EM_NO_MEMORY
1766 && rc <= VINF_EM_LAST)
1767 rc = VINF_EM_NO_MEMORY;
1768 }
1769
1770 return rc;
1771}
1772
1773#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1774/**
1775 * Helper for emR3ForcedActions() for injecting interrupts into the
1776 * nested-guest.
1777 *
1778 * @returns VBox status code.
1779 * @param pVCpu The cross context virtual CPU structure.
1780 * @param pCtx Pointer to the nested-guest CPU context.
1781 * @param pfResched Where to store whether a reschedule is required.
1782 * @param pfInject Where to store whether an interrupt was injected (and if
1783 * a wake up is pending).
1784 */
1785static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1786{
1787 *pfResched = false;
1788 *pfInject = false;
1789 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1790 {
1791 PVM pVM = pVCpu->CTX_SUFF(pVM);
1792 Assert(pCtx->hwvirt.fGif);
1793 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
1794#ifdef VBOX_WITH_RAW_MODE
1795 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1796#endif
1797 if (fVirtualGif)
1798 {
1799 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1800 {
1801 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1802 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1803 {
1804 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1805 {
1806 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1807 if (RT_SUCCESS(rcStrict))
1808 {
1809 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1810 * doesn't intercept HLT but intercepts INTR? */
1811 *pfResched = true;
1812 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1813 if (rcStrict == VINF_SVM_VMEXIT)
1814 return VINF_SUCCESS;
1815 return VBOXSTRICTRC_VAL(rcStrict);
1816 }
1817
1818 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1819 return VINF_EM_TRIPLE_FAULT;
1820 }
1821
1822 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1823 /** @todo this really isn't nice, should properly handle this */
1824 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1825 Assert(rc != VINF_PGM_CHANGE_MODE);
1826 if (rc == VINF_SVM_VMEXIT)
1827 rc = VINF_SUCCESS;
1828 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1829 || rc == VINF_EM_RESCHEDULE_HM
1830 || rc == VINF_EM_RESCHEDULE_RAW))
1831 {
1832 rc = VINF_EM_RESCHEDULE;
1833 }
1834
1835 *pfResched = true;
1836 *pfInject = true;
1837 return rc;
1838 }
1839 }
1840
1841 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1842 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1843 {
1844 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1845 {
1846 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1847 if (RT_SUCCESS(rcStrict))
1848 {
1849 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1850 * doesn't intercept HLT but intercepts VINTR? */
1851 *pfResched = true;
1852 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1853 if (rcStrict == VINF_SVM_VMEXIT)
1854 return VINF_SUCCESS;
1855 return VBOXSTRICTRC_VAL(rcStrict);
1856 }
1857
1858 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1859 return VINF_EM_TRIPLE_FAULT;
1860 }
1861
1862 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1863 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1864 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1865 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1866 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1867
1868 *pfResched = true;
1869 *pfInject = true;
1870 return VINF_EM_RESCHEDULE;
1871 }
1872 }
1873 return VINF_SUCCESS;
1874 }
1875
1876 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1877 { /** @todo Nested VMX. */ }
1878
1879 /* Shouldn't really get here. */
1880 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1881 return VERR_EM_INTERNAL_ERROR;
1882}
1883#endif
1884
1885/**
1886 * Executes all pending forced actions.
1887 *
1888 * Forced actions can cause execution delays and execution
1889 * rescheduling. The first we deal with using action priority, so
1890 * that for instance pending timers aren't scheduled and ran until
1891 * right before execution. The rescheduling we deal with using
1892 * return codes. The same goes for VM termination, only in that case
1893 * we exit everything.
1894 *
1895 * @returns VBox status code of equal or greater importance/severity than rc.
1896 * The most important ones are: VINF_EM_RESCHEDULE,
1897 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1898 *
1899 * @param pVM The cross context VM structure.
1900 * @param pVCpu The cross context virtual CPU structure.
1901 * @param rc The current rc.
1902 *
1903 */
1904int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1905{
1906 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1907#ifdef VBOX_STRICT
1908 int rcIrq = VINF_SUCCESS;
1909#endif
1910 int rc2;
1911#define UPDATE_RC() \
1912 do { \
1913 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1914 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1915 break; \
1916 if (!rc || rc2 < rc) \
1917 rc = rc2; \
1918 } while (0)
1919 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1920
1921 /*
1922 * Post execution chunk first.
1923 */
1924 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1925 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1926 {
1927 /*
1928 * EMT Rendezvous (must be serviced before termination).
1929 */
1930 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1931 {
1932 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1933 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1934 UPDATE_RC();
1935 /** @todo HACK ALERT! The following test is to make sure EM+TM
1936 * thinks the VM is stopped/reset before the next VM state change
1937 * is made. We need a better solution for this, or at least make it
1938 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1939 * VINF_EM_SUSPEND). */
1940 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1941 {
1942 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1943 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1944 return rc;
1945 }
1946 }
1947
1948 /*
1949 * State change request (cleared by vmR3SetStateLocked).
1950 */
1951 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1952 {
1953 VMSTATE enmState = VMR3GetState(pVM);
1954 switch (enmState)
1955 {
1956 case VMSTATE_FATAL_ERROR:
1957 case VMSTATE_FATAL_ERROR_LS:
1958 case VMSTATE_GURU_MEDITATION:
1959 case VMSTATE_GURU_MEDITATION_LS:
1960 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1961 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1962 return VINF_EM_SUSPEND;
1963
1964 case VMSTATE_DESTROYING:
1965 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1966 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1967 return VINF_EM_TERMINATE;
1968
1969 default:
1970 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1971 }
1972 }
1973
1974 /*
1975 * Debugger Facility polling.
1976 */
1977 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1978 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1979 {
1980 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1981 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1982 UPDATE_RC();
1983 }
1984
1985 /*
1986 * Postponed reset request.
1987 */
1988 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1989 {
1990 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1991 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1992 UPDATE_RC();
1993 }
1994
1995#ifdef VBOX_WITH_RAW_MODE
1996 /*
1997 * CSAM page scanning.
1998 */
1999 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2000 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
2001 {
2002 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
2003 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
2004 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2005 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2006 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
2007 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
2008 }
2009#endif
2010
2011 /*
2012 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
2013 */
2014 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2015 {
2016 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2017 UPDATE_RC();
2018 if (rc == VINF_EM_NO_MEMORY)
2019 return rc;
2020 }
2021
2022 /* check that we got them all */
2023 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2024 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
2025 }
2026
2027 /*
2028 * Normal priority then.
2029 * (Executed in no particular order.)
2030 */
2031 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
2032 {
2033 /*
2034 * PDM Queues are pending.
2035 */
2036 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
2037 PDMR3QueueFlushAll(pVM);
2038
2039 /*
2040 * PDM DMA transfers are pending.
2041 */
2042 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
2043 PDMR3DmaRun(pVM);
2044
2045 /*
2046 * EMT Rendezvous (make sure they are handled before the requests).
2047 */
2048 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2049 {
2050 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2051 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2052 UPDATE_RC();
2053 /** @todo HACK ALERT! The following test is to make sure EM+TM
2054 * thinks the VM is stopped/reset before the next VM state change
2055 * is made. We need a better solution for this, or at least make it
2056 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2057 * VINF_EM_SUSPEND). */
2058 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2059 {
2060 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2061 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2062 return rc;
2063 }
2064 }
2065
2066 /*
2067 * Requests from other threads.
2068 */
2069 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
2070 {
2071 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2072 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
2073 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
2074 {
2075 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2076 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2077 return rc2;
2078 }
2079 UPDATE_RC();
2080 /** @todo HACK ALERT! The following test is to make sure EM+TM
2081 * thinks the VM is stopped/reset before the next VM state change
2082 * is made. We need a better solution for this, or at least make it
2083 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2084 * VINF_EM_SUSPEND). */
2085 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2086 {
2087 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2088 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2089 return rc;
2090 }
2091 }
2092
2093#ifdef VBOX_WITH_REM
2094 /* Replay the handler notification changes. */
2095 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2096 {
2097 /* Try not to cause deadlocks. */
2098 if ( pVM->cCpus == 1
2099 || ( !PGMIsLockOwner(pVM)
2100 && !IOMIsLockWriteOwner(pVM))
2101 )
2102 {
2103 EMRemLock(pVM);
2104 REMR3ReplayHandlerNotifications(pVM);
2105 EMRemUnlock(pVM);
2106 }
2107 }
2108#endif
2109
2110 /* check that we got them all */
2111 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2112 }
2113
2114 /*
2115 * Normal priority then. (per-VCPU)
2116 * (Executed in no particular order.)
2117 */
2118 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2119 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2120 {
2121 /*
2122 * Requests from other threads.
2123 */
2124 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2125 {
2126 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2127 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2128 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2129 {
2130 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2131 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2132 return rc2;
2133 }
2134 UPDATE_RC();
2135 /** @todo HACK ALERT! The following test is to make sure EM+TM
2136 * thinks the VM is stopped/reset before the next VM state change
2137 * is made. We need a better solution for this, or at least make it
2138 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2139 * VINF_EM_SUSPEND). */
2140 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2141 {
2142 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2143 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2144 return rc;
2145 }
2146 }
2147
2148 /* check that we got them all */
2149 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2150 }
2151
2152 /*
2153 * High priority pre execution chunk last.
2154 * (Executed in ascending priority order.)
2155 */
2156 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2157 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2158 {
2159 /*
2160 * Timers before interrupts.
2161 */
2162 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2163 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2164 TMR3TimerQueuesDo(pVM);
2165
2166 /*
2167 * Pick up asynchronously posted interrupts into the APIC.
2168 */
2169 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2170 APICUpdatePendingInterrupts(pVCpu);
2171
2172 /*
2173 * The instruction following an emulated STI should *always* be executed!
2174 *
2175 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2176 * the eip is the same as the inhibited instr address. Before we
2177 * are able to execute this instruction in raw mode (iret to
2178 * guest code) an external interrupt might force a world switch
2179 * again. Possibly allowing a guest interrupt to be dispatched
2180 * in the process. This could break the guest. Sounds very
2181 * unlikely, but such timing sensitive problem are not as rare as
2182 * you might think.
2183 */
2184 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2185 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2186 {
2187 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2188 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2189 {
2190 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2191 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2192 }
2193 else
2194 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2195 }
2196
2197 /*
2198 * Interrupts.
2199 */
2200 bool fWakeupPending = false;
2201 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2202 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2203 {
2204 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2205 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2206 {
2207 Assert(!HMR3IsEventPending(pVCpu));
2208 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2209#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2210 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2211 {
2212 bool fResched, fInject;
2213 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2214 if (fInject)
2215 {
2216 fWakeupPending = true;
2217# ifdef VBOX_STRICT
2218 rcIrq = rc2;
2219# endif
2220 }
2221 if (fResched)
2222 UPDATE_RC();
2223 }
2224 else
2225#endif
2226 {
2227 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2228 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2229#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2230 && pCtx->hwvirt.fGif
2231#endif
2232#ifdef VBOX_WITH_RAW_MODE
2233 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2234#endif
2235 && pCtx->eflags.Bits.u1IF)
2236 {
2237 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2238 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2239 /** @todo this really isn't nice, should properly handle this */
2240 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2241 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2242 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2243 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2244 || rc2 == VINF_EM_RESCHEDULE_HM
2245 || rc2 == VINF_EM_RESCHEDULE_RAW))
2246 {
2247 rc2 = VINF_EM_RESCHEDULE;
2248 }
2249#ifdef VBOX_STRICT
2250 rcIrq = rc2;
2251#endif
2252 UPDATE_RC();
2253 /* Reschedule required: We must not miss the wakeup below! */
2254 fWakeupPending = true;
2255 }
2256 }
2257 }
2258 }
2259
2260 /*
2261 * Allocate handy pages.
2262 */
2263 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2264 {
2265 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2266 UPDATE_RC();
2267 }
2268
2269 /*
2270 * Debugger Facility request.
2271 */
2272 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2273 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2274 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2275 {
2276 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2277 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2278 UPDATE_RC();
2279 }
2280
2281 /*
2282 * EMT Rendezvous (must be serviced before termination).
2283 */
2284 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2285 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2286 {
2287 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2288 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2289 UPDATE_RC();
2290 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2291 * stopped/reset before the next VM state change is made. We need a better
2292 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2293 * && rc >= VINF_EM_SUSPEND). */
2294 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2295 {
2296 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2297 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2298 return rc;
2299 }
2300 }
2301
2302 /*
2303 * State change request (cleared by vmR3SetStateLocked).
2304 */
2305 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2306 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2307 {
2308 VMSTATE enmState = VMR3GetState(pVM);
2309 switch (enmState)
2310 {
2311 case VMSTATE_FATAL_ERROR:
2312 case VMSTATE_FATAL_ERROR_LS:
2313 case VMSTATE_GURU_MEDITATION:
2314 case VMSTATE_GURU_MEDITATION_LS:
2315 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2316 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2317 return VINF_EM_SUSPEND;
2318
2319 case VMSTATE_DESTROYING:
2320 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2321 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2322 return VINF_EM_TERMINATE;
2323
2324 default:
2325 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2326 }
2327 }
2328
2329 /*
2330 * Out of memory? Since most of our fellow high priority actions may cause us
2331 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2332 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2333 * than us since we can terminate without allocating more memory.
2334 */
2335 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2336 {
2337 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2338 UPDATE_RC();
2339 if (rc == VINF_EM_NO_MEMORY)
2340 return rc;
2341 }
2342
2343 /*
2344 * If the virtual sync clock is still stopped, make TM restart it.
2345 */
2346 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2347 TMR3VirtualSyncFF(pVM, pVCpu);
2348
2349#ifdef DEBUG
2350 /*
2351 * Debug, pause the VM.
2352 */
2353 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2354 {
2355 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2356 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2357 return VINF_EM_SUSPEND;
2358 }
2359#endif
2360
2361 /* check that we got them all */
2362 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2363 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2364 }
2365
2366#undef UPDATE_RC
2367 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2368 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2369 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2370 return rc;
2371}
2372
2373
2374/**
2375 * Check if the preset execution time cap restricts guest execution scheduling.
2376 *
2377 * @returns true if allowed, false otherwise
2378 * @param pVM The cross context VM structure.
2379 * @param pVCpu The cross context virtual CPU structure.
2380 */
2381bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2382{
2383 uint64_t u64UserTime, u64KernelTime;
2384
2385 if ( pVM->uCpuExecutionCap != 100
2386 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2387 {
2388 uint64_t u64TimeNow = RTTimeMilliTS();
2389 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2390 {
2391 /* New time slice. */
2392 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2393 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2394 pVCpu->em.s.u64TimeSliceExec = 0;
2395 }
2396 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2397
2398 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2399 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2400 return false;
2401 }
2402 return true;
2403}
2404
2405
2406/**
2407 * Execute VM.
2408 *
2409 * This function is the main loop of the VM. The emulation thread
2410 * calls this function when the VM has been successfully constructed
2411 * and we're ready for executing the VM.
2412 *
2413 * Returning from this function means that the VM is turned off or
2414 * suspended (state already saved) and deconstruction is next in line.
2415 *
2416 * All interaction from other thread are done using forced actions
2417 * and signaling of the wait object.
2418 *
2419 * @returns VBox status code, informational status codes may indicate failure.
2420 * @param pVM The cross context VM structure.
2421 * @param pVCpu The cross context virtual CPU structure.
2422 */
2423VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2424{
2425 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2426 pVM,
2427 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2428 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2429 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2430 pVCpu->em.s.fForceRAW));
2431 VM_ASSERT_EMT(pVM);
2432 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2433 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2434 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2435 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2436
2437 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2438 if (rc == 0)
2439 {
2440 /*
2441 * Start the virtual time.
2442 */
2443 TMR3NotifyResume(pVM, pVCpu);
2444
2445 /*
2446 * The Outer Main Loop.
2447 */
2448 bool fFFDone = false;
2449
2450 /* Reschedule right away to start in the right state. */
2451 rc = VINF_SUCCESS;
2452
2453 /* If resuming after a pause or a state load, restore the previous
2454 state or else we'll start executing code. Else, just reschedule. */
2455 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2456 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2457 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2458 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2459 else
2460 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2461 pVCpu->em.s.cIemThenRemInstructions = 0;
2462 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2463
2464 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2465 for (;;)
2466 {
2467 /*
2468 * Before we can schedule anything (we're here because
2469 * scheduling is required) we must service any pending
2470 * forced actions to avoid any pending action causing
2471 * immediate rescheduling upon entering an inner loop
2472 *
2473 * Do forced actions.
2474 */
2475 if ( !fFFDone
2476 && RT_SUCCESS(rc)
2477 && rc != VINF_EM_TERMINATE
2478 && rc != VINF_EM_OFF
2479 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2480 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2481 {
2482 rc = emR3ForcedActions(pVM, pVCpu, rc);
2483 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2484 if ( ( rc == VINF_EM_RESCHEDULE_REM
2485 || rc == VINF_EM_RESCHEDULE_HM)
2486 && pVCpu->em.s.fForceRAW)
2487 rc = VINF_EM_RESCHEDULE_RAW;
2488 }
2489 else if (fFFDone)
2490 fFFDone = false;
2491
2492 /*
2493 * Now what to do?
2494 */
2495 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2496 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2497 switch (rc)
2498 {
2499 /*
2500 * Keep doing what we're currently doing.
2501 */
2502 case VINF_SUCCESS:
2503 break;
2504
2505 /*
2506 * Reschedule - to raw-mode execution.
2507 */
2508/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2509 case VINF_EM_RESCHEDULE_RAW:
2510 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2511 if (VM_IS_RAW_MODE_ENABLED(pVM))
2512 {
2513 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2514 pVCpu->em.s.enmState = EMSTATE_RAW;
2515 }
2516 else
2517 {
2518 AssertLogRelFailed();
2519 pVCpu->em.s.enmState = EMSTATE_NONE;
2520 }
2521 break;
2522
2523 /*
2524 * Reschedule - to HM or NEM.
2525 */
2526 case VINF_EM_RESCHEDULE_HM:
2527 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2528 Assert(!pVCpu->em.s.fForceRAW);
2529 if (VM_IS_HM_ENABLED(pVM))
2530 {
2531 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2532 pVCpu->em.s.enmState = EMSTATE_HM;
2533 }
2534 else if (VM_IS_NEM_ENABLED(pVM))
2535 {
2536 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2537 pVCpu->em.s.enmState = EMSTATE_NEM;
2538 }
2539 else
2540 {
2541 AssertLogRelFailed();
2542 pVCpu->em.s.enmState = EMSTATE_NONE;
2543 }
2544 break;
2545
2546 /*
2547 * Reschedule - to recompiled execution.
2548 */
2549 case VINF_EM_RESCHEDULE_REM:
2550 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2551 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2552 {
2553 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2554 enmOldState, EMSTATE_IEM_THEN_REM));
2555 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2556 {
2557 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2558 pVCpu->em.s.cIemThenRemInstructions = 0;
2559 }
2560 }
2561 else
2562 {
2563 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2564 pVCpu->em.s.enmState = EMSTATE_REM;
2565 }
2566 break;
2567
2568 /*
2569 * Resume.
2570 */
2571 case VINF_EM_RESUME:
2572 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2573 /* Don't reschedule in the halted or wait for SIPI case. */
2574 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2575 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2576 {
2577 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2578 break;
2579 }
2580 /* fall through and get scheduled. */
2581 RT_FALL_THRU();
2582
2583 /*
2584 * Reschedule.
2585 */
2586 case VINF_EM_RESCHEDULE:
2587 {
2588 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2589 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2590 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2591 pVCpu->em.s.cIemThenRemInstructions = 0;
2592 pVCpu->em.s.enmState = enmState;
2593 break;
2594 }
2595
2596 /*
2597 * Halted.
2598 */
2599 case VINF_EM_HALT:
2600 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2601 pVCpu->em.s.enmState = EMSTATE_HALTED;
2602 break;
2603
2604 /*
2605 * Switch to the wait for SIPI state (application processor only)
2606 */
2607 case VINF_EM_WAIT_SIPI:
2608 Assert(pVCpu->idCpu != 0);
2609 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2610 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2611 break;
2612
2613
2614 /*
2615 * Suspend.
2616 */
2617 case VINF_EM_SUSPEND:
2618 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2619 Assert(enmOldState != EMSTATE_SUSPENDED);
2620 pVCpu->em.s.enmPrevState = enmOldState;
2621 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2622 break;
2623
2624 /*
2625 * Reset.
2626 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2627 */
2628 case VINF_EM_RESET:
2629 {
2630 if (pVCpu->idCpu == 0)
2631 {
2632 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2633 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2634 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2635 pVCpu->em.s.cIemThenRemInstructions = 0;
2636 pVCpu->em.s.enmState = enmState;
2637 }
2638 else
2639 {
2640 /* All other VCPUs go into the wait for SIPI state. */
2641 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2642 }
2643 break;
2644 }
2645
2646 /*
2647 * Power Off.
2648 */
2649 case VINF_EM_OFF:
2650 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2651 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2652 TMR3NotifySuspend(pVM, pVCpu);
2653 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2654 return rc;
2655
2656 /*
2657 * Terminate the VM.
2658 */
2659 case VINF_EM_TERMINATE:
2660 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2661 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2662 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2663 TMR3NotifySuspend(pVM, pVCpu);
2664 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2665 return rc;
2666
2667
2668 /*
2669 * Out of memory, suspend the VM and stuff.
2670 */
2671 case VINF_EM_NO_MEMORY:
2672 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2673 Assert(enmOldState != EMSTATE_SUSPENDED);
2674 pVCpu->em.s.enmPrevState = enmOldState;
2675 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2676 TMR3NotifySuspend(pVM, pVCpu);
2677 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2678
2679 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2680 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2681 if (rc != VINF_EM_SUSPEND)
2682 {
2683 if (RT_SUCCESS_NP(rc))
2684 {
2685 AssertLogRelMsgFailed(("%Rrc\n", rc));
2686 rc = VERR_EM_INTERNAL_ERROR;
2687 }
2688 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2689 }
2690 return rc;
2691
2692 /*
2693 * Guest debug events.
2694 */
2695 case VINF_EM_DBG_STEPPED:
2696 case VINF_EM_DBG_STOP:
2697 case VINF_EM_DBG_EVENT:
2698 case VINF_EM_DBG_BREAKPOINT:
2699 case VINF_EM_DBG_STEP:
2700 if (enmOldState == EMSTATE_RAW)
2701 {
2702 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2703 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2704 }
2705 else if (enmOldState == EMSTATE_HM)
2706 {
2707 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2708 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2709 }
2710 else if (enmOldState == EMSTATE_NEM)
2711 {
2712 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2713 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2714 }
2715 else if (enmOldState == EMSTATE_REM)
2716 {
2717 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2718 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2719 }
2720 else
2721 {
2722 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2723 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2724 }
2725 break;
2726
2727 /*
2728 * Hypervisor debug events.
2729 */
2730 case VINF_EM_DBG_HYPER_STEPPED:
2731 case VINF_EM_DBG_HYPER_BREAKPOINT:
2732 case VINF_EM_DBG_HYPER_ASSERTION:
2733 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2734 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2735 break;
2736
2737 /*
2738 * Triple fault.
2739 */
2740 case VINF_EM_TRIPLE_FAULT:
2741 if (!pVM->em.s.fGuruOnTripleFault)
2742 {
2743 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2744 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2745 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2746 continue;
2747 }
2748 /* Else fall through and trigger a guru. */
2749 RT_FALL_THRU();
2750
2751 case VERR_VMM_RING0_ASSERTION:
2752 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2753 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2754 break;
2755
2756 /*
2757 * Any error code showing up here other than the ones we
2758 * know and process above are considered to be FATAL.
2759 *
2760 * Unknown warnings and informational status codes are also
2761 * included in this.
2762 */
2763 default:
2764 if (RT_SUCCESS_NP(rc))
2765 {
2766 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2767 rc = VERR_EM_INTERNAL_ERROR;
2768 }
2769 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2770 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2771 break;
2772 }
2773
2774 /*
2775 * Act on state transition.
2776 */
2777 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2778 if (enmOldState != enmNewState)
2779 {
2780 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2781
2782 /* Clear MWait flags and the unhalt FF. */
2783 if ( enmOldState == EMSTATE_HALTED
2784 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2785 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2786 && ( enmNewState == EMSTATE_RAW
2787 || enmNewState == EMSTATE_HM
2788 || enmNewState == EMSTATE_NEM
2789 || enmNewState == EMSTATE_REM
2790 || enmNewState == EMSTATE_IEM_THEN_REM
2791 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2792 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2793 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2794 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2795 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2796 {
2797 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2798 {
2799 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2800 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2801 }
2802 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2803 {
2804 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2805 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2806 }
2807 }
2808 }
2809 else
2810 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2811
2812 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2813 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2814
2815 /*
2816 * Act on the new state.
2817 */
2818 switch (enmNewState)
2819 {
2820 /*
2821 * Execute raw.
2822 */
2823 case EMSTATE_RAW:
2824#ifdef VBOX_WITH_RAW_MODE
2825 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2826#else
2827 AssertLogRelMsgFailed(("%Rrc\n", rc));
2828 rc = VERR_EM_INTERNAL_ERROR;
2829#endif
2830 break;
2831
2832 /*
2833 * Execute hardware accelerated raw.
2834 */
2835 case EMSTATE_HM:
2836 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2837 break;
2838
2839 /*
2840 * Execute hardware accelerated raw.
2841 */
2842 case EMSTATE_NEM:
2843 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2844 break;
2845
2846 /*
2847 * Execute recompiled.
2848 */
2849 case EMSTATE_REM:
2850 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2851 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2852 break;
2853
2854 /*
2855 * Execute in the interpreter.
2856 */
2857 case EMSTATE_IEM:
2858 {
2859#if 0 /* For testing purposes. */
2860 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2861 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2862 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2863 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2864 rc = VINF_SUCCESS;
2865 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2866#endif
2867 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2868 if (pVM->em.s.fIemExecutesAll)
2869 {
2870 Assert(rc != VINF_EM_RESCHEDULE_REM);
2871 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2872 Assert(rc != VINF_EM_RESCHEDULE_HM);
2873 }
2874 fFFDone = false;
2875 break;
2876 }
2877
2878 /*
2879 * Execute in IEM, hoping we can quickly switch aback to HM
2880 * or RAW execution. If our hopes fail, we go to REM.
2881 */
2882 case EMSTATE_IEM_THEN_REM:
2883 {
2884 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2885 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2886 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2887 break;
2888 }
2889
2890 /*
2891 * Application processor execution halted until SIPI.
2892 */
2893 case EMSTATE_WAIT_SIPI:
2894 /* no break */
2895 /*
2896 * hlt - execution halted until interrupt.
2897 */
2898 case EMSTATE_HALTED:
2899 {
2900 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2901 /* If HM (or someone else) store a pending interrupt in
2902 TRPM, it must be dispatched ASAP without any halting.
2903 Anything pending in TRPM has been accepted and the CPU
2904 should already be the right state to receive it. */
2905 if (TRPMHasTrap(pVCpu))
2906 rc = VINF_EM_RESCHEDULE;
2907 /* MWAIT has a special extension where it's woken up when
2908 an interrupt is pending even when IF=0. */
2909 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2910 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2911 {
2912 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2913 if (rc == VINF_SUCCESS)
2914 {
2915 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2916 APICUpdatePendingInterrupts(pVCpu);
2917
2918 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2919 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2920 {
2921 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2922 rc = VINF_EM_RESCHEDULE;
2923 }
2924 }
2925 }
2926 else
2927 {
2928 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2929 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2930 check VMCPU_FF_UPDATE_APIC here. */
2931 if ( rc == VINF_SUCCESS
2932 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2933 {
2934 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2935 rc = VINF_EM_RESCHEDULE;
2936 }
2937 }
2938
2939 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2940 break;
2941 }
2942
2943 /*
2944 * Suspended - return to VM.cpp.
2945 */
2946 case EMSTATE_SUSPENDED:
2947 TMR3NotifySuspend(pVM, pVCpu);
2948 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2949 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2950 return VINF_EM_SUSPEND;
2951
2952 /*
2953 * Debugging in the guest.
2954 */
2955 case EMSTATE_DEBUG_GUEST_RAW:
2956 case EMSTATE_DEBUG_GUEST_HM:
2957 case EMSTATE_DEBUG_GUEST_NEM:
2958 case EMSTATE_DEBUG_GUEST_IEM:
2959 case EMSTATE_DEBUG_GUEST_REM:
2960 TMR3NotifySuspend(pVM, pVCpu);
2961 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2962 TMR3NotifyResume(pVM, pVCpu);
2963 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2964 break;
2965
2966 /*
2967 * Debugging in the hypervisor.
2968 */
2969 case EMSTATE_DEBUG_HYPER:
2970 {
2971 TMR3NotifySuspend(pVM, pVCpu);
2972 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2973
2974 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2975 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2976 if (rc != VINF_SUCCESS)
2977 {
2978 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2979 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2980 else
2981 {
2982 /* switch to guru meditation mode */
2983 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2984 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2985 VMMR3FatalDump(pVM, pVCpu, rc);
2986 }
2987 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2988 return rc;
2989 }
2990
2991 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2992 TMR3NotifyResume(pVM, pVCpu);
2993 break;
2994 }
2995
2996 /*
2997 * Guru meditation takes place in the debugger.
2998 */
2999 case EMSTATE_GURU_MEDITATION:
3000 {
3001 TMR3NotifySuspend(pVM, pVCpu);
3002 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3003 VMMR3FatalDump(pVM, pVCpu, rc);
3004 emR3Debug(pVM, pVCpu, rc);
3005 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3006 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3007 return rc;
3008 }
3009
3010 /*
3011 * The states we don't expect here.
3012 */
3013 case EMSTATE_NONE:
3014 case EMSTATE_TERMINATING:
3015 default:
3016 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3017 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3018 TMR3NotifySuspend(pVM, pVCpu);
3019 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3020 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3021 return VERR_EM_INTERNAL_ERROR;
3022 }
3023 } /* The Outer Main Loop */
3024 }
3025 else
3026 {
3027 /*
3028 * Fatal error.
3029 */
3030 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3031 TMR3NotifySuspend(pVM, pVCpu);
3032 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3033 VMMR3FatalDump(pVM, pVCpu, rc);
3034 emR3Debug(pVM, pVCpu, rc);
3035 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3036 /** @todo change the VM state! */
3037 return rc;
3038 }
3039
3040 /* not reached */
3041}
3042
3043/**
3044 * Notify EM of a state change (used by FTM)
3045 *
3046 * @param pVM The cross context VM structure.
3047 */
3048VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3049{
3050 PVMCPU pVCpu = VMMGetCpu(pVM);
3051
3052 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3053 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3054 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3055 return VINF_SUCCESS;
3056}
3057
3058/**
3059 * Notify EM of a state change (used by FTM)
3060 *
3061 * @param pVM The cross context VM structure.
3062 */
3063VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3064{
3065 PVMCPU pVCpu = VMMGetCpu(pVM);
3066 EMSTATE enmCurState = pVCpu->em.s.enmState;
3067
3068 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3069 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3070 pVCpu->em.s.enmPrevState = enmCurState;
3071 return VINF_SUCCESS;
3072}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette