VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72579

Last change on this file since 72579 was 72579, checked in by vboxsync, 6 years ago

EM: Implemented a very simple history record replacement strategy. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 140.9 KB
Line 
1/* $Id: EM.cpp 72579 2018-06-16 14:32:26Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94#if defined(VBOX_WITH_REM) || defined(DEBUG)
95static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
96#endif
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
153
154#ifdef VBOX_WITH_REM
155 /*
156 * Initialize the REM critical section.
157 */
158 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
159 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
160 AssertRCReturn(rc, rc);
161#endif
162
163 /*
164 * Saved state.
165 */
166 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
167 NULL, NULL, NULL,
168 NULL, emR3Save, NULL,
169 NULL, emR3Load, NULL);
170 if (RT_FAILURE(rc))
171 return rc;
172
173 for (VMCPUID i = 0; i < pVM->cCpus; i++)
174 {
175 PVMCPU pVCpu = &pVM->aCpus[i];
176
177 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
178 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
179 pVCpu->em.s.fForceRAW = false;
180
181 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
182#ifdef VBOX_WITH_RAW_MODE
183 if (VM_IS_RAW_MODE_ENABLED(pVM))
184 {
185 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
186 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
187 }
188#endif
189
190 /* Force reset of the time slice. */
191 pVCpu->em.s.u64TimeSliceStart = 0;
192
193# define EM_REG_COUNTER(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_COUNTER_USED(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
199 AssertRC(rc);
200
201# define EM_REG_PROFILE(a, b, c) \
202 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
203 AssertRC(rc);
204
205# define EM_REG_PROFILE_ADV(a, b, c) \
206 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
207 AssertRC(rc);
208
209 /*
210 * Statistics.
211 */
212#ifdef VBOX_WITH_STATISTICS
213 PEMSTATS pStats;
214 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 pVCpu->em.s.pStatsR3 = pStats;
219 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
220 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
221
222 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
223 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
224
225 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
227
228 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
302
303 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
304 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
305
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
358
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
387
388 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
390 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
392
393 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
432#endif /* VBOX_WITH_STATISTICS */
433 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
434 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
435#ifdef VBOX_WITH_STATISTICS
436 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
440 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
441 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
442#endif /* VBOX_WITH_STATISTICS */
443
444 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
445 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
446 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
447 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
448 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
449
450 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
451
452 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
453 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", i);
454 AssertRC(rc);
455
456 /* History record statistics */
457 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
458 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", i);
459 AssertRC(rc);
460
461 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
462 {
463 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
464 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", i, iStep);
465 AssertRC(rc);
466 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
467 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", i, iStep);
468 AssertRC(rc);
469 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
470 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", i, iStep);
471 AssertRC(rc);
472 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
473 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", i, iStep);
474 AssertRC(rc);
475 }
476 }
477
478 emR3InitDbg(pVM);
479 return VINF_SUCCESS;
480}
481
482
483/**
484 * Applies relocations to data and code managed by this
485 * component. This function will be called at init and
486 * whenever the VMM need to relocate it self inside the GC.
487 *
488 * @param pVM The cross context VM structure.
489 */
490VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
491{
492 LogFlow(("EMR3Relocate\n"));
493 for (VMCPUID i = 0; i < pVM->cCpus; i++)
494 {
495 PVMCPU pVCpu = &pVM->aCpus[i];
496 if (pVCpu->em.s.pStatsR3)
497 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
498 }
499}
500
501
502/**
503 * Reset the EM state for a CPU.
504 *
505 * Called by EMR3Reset and hot plugging.
506 *
507 * @param pVCpu The cross context virtual CPU structure.
508 */
509VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
510{
511 /* Reset scheduling state. */
512 pVCpu->em.s.fForceRAW = false;
513 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
514
515 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
516 out of the HALTED state here so that enmPrevState doesn't end up as
517 HALTED when EMR3Execute returns. */
518 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
519 {
520 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
521 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
522 }
523}
524
525
526/**
527 * Reset notification.
528 *
529 * @param pVM The cross context VM structure.
530 */
531VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
532{
533 Log(("EMR3Reset: \n"));
534 for (VMCPUID i = 0; i < pVM->cCpus; i++)
535 EMR3ResetCpu(&pVM->aCpus[i]);
536}
537
538
539/**
540 * Terminates the EM.
541 *
542 * Termination means cleaning up and freeing all resources,
543 * the VM it self is at this point powered off or suspended.
544 *
545 * @returns VBox status code.
546 * @param pVM The cross context VM structure.
547 */
548VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
549{
550 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
551
552#ifdef VBOX_WITH_REM
553 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
554#else
555 RT_NOREF(pVM);
556#endif
557 return VINF_SUCCESS;
558}
559
560
561/**
562 * Execute state save operation.
563 *
564 * @returns VBox status code.
565 * @param pVM The cross context VM structure.
566 * @param pSSM SSM operation handle.
567 */
568static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
569{
570 for (VMCPUID i = 0; i < pVM->cCpus; i++)
571 {
572 PVMCPU pVCpu = &pVM->aCpus[i];
573
574 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
575
576 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
577 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
578 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
579
580 /* Save mwait state. */
581 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
582 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
583 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
584 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
585 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
586 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
587 AssertRCReturn(rc, rc);
588 }
589 return VINF_SUCCESS;
590}
591
592
593/**
594 * Execute state load operation.
595 *
596 * @returns VBox status code.
597 * @param pVM The cross context VM structure.
598 * @param pSSM SSM operation handle.
599 * @param uVersion Data layout version.
600 * @param uPass The data pass.
601 */
602static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
603{
604 /*
605 * Validate version.
606 */
607 if ( uVersion > EM_SAVED_STATE_VERSION
608 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
609 {
610 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
611 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
612 }
613 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
614
615 /*
616 * Load the saved state.
617 */
618 for (VMCPUID i = 0; i < pVM->cCpus; i++)
619 {
620 PVMCPU pVCpu = &pVM->aCpus[i];
621
622 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
623 if (RT_FAILURE(rc))
624 pVCpu->em.s.fForceRAW = false;
625 AssertRCReturn(rc, rc);
626
627 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
628 {
629 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
630 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
631 AssertRCReturn(rc, rc);
632 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
633
634 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
635 }
636 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
637 {
638 /* Load mwait state. */
639 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
640 AssertRCReturn(rc, rc);
641 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
642 AssertRCReturn(rc, rc);
643 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
644 AssertRCReturn(rc, rc);
645 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
646 AssertRCReturn(rc, rc);
647 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
648 AssertRCReturn(rc, rc);
649 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
650 AssertRCReturn(rc, rc);
651 }
652
653 Assert(!pVCpu->em.s.pCliStatTree);
654 }
655 return VINF_SUCCESS;
656}
657
658
659/**
660 * Argument packet for emR3SetExecutionPolicy.
661 */
662struct EMR3SETEXECPOLICYARGS
663{
664 EMEXECPOLICY enmPolicy;
665 bool fEnforce;
666};
667
668
669/**
670 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
671 */
672static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
673{
674 /*
675 * Only the first CPU changes the variables.
676 */
677 if (pVCpu->idCpu == 0)
678 {
679 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
680 switch (pArgs->enmPolicy)
681 {
682 case EMEXECPOLICY_RECOMPILE_RING0:
683 pVM->fRecompileSupervisor = pArgs->fEnforce;
684 break;
685 case EMEXECPOLICY_RECOMPILE_RING3:
686 pVM->fRecompileUser = pArgs->fEnforce;
687 break;
688 case EMEXECPOLICY_IEM_ALL:
689 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
690 break;
691 default:
692 AssertFailedReturn(VERR_INVALID_PARAMETER);
693 }
694 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
695 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
696 }
697
698 /*
699 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
700 */
701 return pVCpu->em.s.enmState == EMSTATE_RAW
702 || pVCpu->em.s.enmState == EMSTATE_HM
703 || pVCpu->em.s.enmState == EMSTATE_NEM
704 || pVCpu->em.s.enmState == EMSTATE_IEM
705 || pVCpu->em.s.enmState == EMSTATE_REM
706 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
707 ? VINF_EM_RESCHEDULE
708 : VINF_SUCCESS;
709}
710
711
712/**
713 * Changes an execution scheduling policy parameter.
714 *
715 * This is used to enable or disable raw-mode / hardware-virtualization
716 * execution of user and supervisor code.
717 *
718 * @returns VINF_SUCCESS on success.
719 * @returns VINF_RESCHEDULE if a rescheduling might be required.
720 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
721 *
722 * @param pUVM The user mode VM handle.
723 * @param enmPolicy The scheduling policy to change.
724 * @param fEnforce Whether to enforce the policy or not.
725 */
726VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
727{
728 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
729 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
730 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
731
732 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
733 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
734}
735
736
737/**
738 * Queries an execution scheduling policy parameter.
739 *
740 * @returns VBox status code
741 * @param pUVM The user mode VM handle.
742 * @param enmPolicy The scheduling policy to query.
743 * @param pfEnforced Where to return the current value.
744 */
745VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
746{
747 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
748 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
749 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
750 PVM pVM = pUVM->pVM;
751 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
752
753 /* No need to bother EMTs with a query. */
754 switch (enmPolicy)
755 {
756 case EMEXECPOLICY_RECOMPILE_RING0:
757 *pfEnforced = pVM->fRecompileSupervisor;
758 break;
759 case EMEXECPOLICY_RECOMPILE_RING3:
760 *pfEnforced = pVM->fRecompileUser;
761 break;
762 case EMEXECPOLICY_IEM_ALL:
763 *pfEnforced = pVM->em.s.fIemExecutesAll;
764 break;
765 default:
766 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
767 }
768
769 return VINF_SUCCESS;
770}
771
772
773/**
774 * Queries the main execution engine of the VM.
775 *
776 * @returns VBox status code
777 * @param pUVM The user mode VM handle.
778 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
779 */
780VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
781{
782 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
783 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
784
785 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
786 PVM pVM = pUVM->pVM;
787 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
788
789 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
790 return VINF_SUCCESS;
791}
792
793
794/**
795 * Raise a fatal error.
796 *
797 * Safely terminate the VM with full state report and stuff. This function
798 * will naturally never return.
799 *
800 * @param pVCpu The cross context virtual CPU structure.
801 * @param rc VBox status code.
802 */
803VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
804{
805 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
806 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
807}
808
809
810#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
811/**
812 * Gets the EM state name.
813 *
814 * @returns pointer to read only state name,
815 * @param enmState The state.
816 */
817static const char *emR3GetStateName(EMSTATE enmState)
818{
819 switch (enmState)
820 {
821 case EMSTATE_NONE: return "EMSTATE_NONE";
822 case EMSTATE_RAW: return "EMSTATE_RAW";
823 case EMSTATE_HM: return "EMSTATE_HM";
824 case EMSTATE_IEM: return "EMSTATE_IEM";
825 case EMSTATE_REM: return "EMSTATE_REM";
826 case EMSTATE_HALTED: return "EMSTATE_HALTED";
827 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
828 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
829 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
830 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
831 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
832 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
833 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
834 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
835 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
836 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
837 case EMSTATE_NEM: return "EMSTATE_NEM";
838 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
839 default: return "Unknown!";
840 }
841}
842#endif /* LOG_ENABLED || VBOX_STRICT */
843
844
845/**
846 * Handle pending ring-3 I/O port write.
847 *
848 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
849 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
850 *
851 * @returns Strict VBox status code.
852 * @param pVM The cross context VM structure.
853 * @param pVCpu The cross context virtual CPU structure.
854 */
855VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
856{
857 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
858
859 /* Get and clear the pending data. */
860 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
861 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
862 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
863 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
864 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
865
866 /* Assert sanity. */
867 switch (cbValue)
868 {
869 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
870 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
871 case 4: break;
872 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
873 }
874 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
875
876 /* Do the work.*/
877 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
878 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
879 if (IOM_SUCCESS(rcStrict))
880 {
881 pVCpu->cpum.GstCtx.rip += cbInstr;
882 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
883 }
884 return rcStrict;
885}
886
887
888/**
889 * Handle pending ring-3 I/O port write.
890 *
891 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
892 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
893 *
894 * @returns Strict VBox status code.
895 * @param pVM The cross context VM structure.
896 * @param pVCpu The cross context virtual CPU structure.
897 */
898VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
899{
900 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
901
902 /* Get and clear the pending data. */
903 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
904 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
905 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
906 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
907
908 /* Assert sanity. */
909 switch (cbValue)
910 {
911 case 1: break;
912 case 2: break;
913 case 4: break;
914 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
915 }
916 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
917 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
918
919 /* Do the work.*/
920 uint32_t uValue = 0;
921 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
922 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
923 if (IOM_SUCCESS(rcStrict))
924 {
925 if (cbValue == 4)
926 pVCpu->cpum.GstCtx.rax = uValue;
927 else if (cbValue == 2)
928 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
929 else
930 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
931 pVCpu->cpum.GstCtx.rip += cbInstr;
932 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
933 }
934 return rcStrict;
935}
936
937
938/**
939 * Debug loop.
940 *
941 * @returns VBox status code for EM.
942 * @param pVM The cross context VM structure.
943 * @param pVCpu The cross context virtual CPU structure.
944 * @param rc Current EM VBox status code.
945 */
946static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
947{
948 for (;;)
949 {
950 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
951 const VBOXSTRICTRC rcLast = rc;
952
953 /*
954 * Debug related RC.
955 */
956 switch (VBOXSTRICTRC_VAL(rc))
957 {
958 /*
959 * Single step an instruction.
960 */
961 case VINF_EM_DBG_STEP:
962 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
963 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
964 || pVCpu->em.s.fForceRAW /* paranoia */)
965#ifdef VBOX_WITH_RAW_MODE
966 rc = emR3RawStep(pVM, pVCpu);
967#else
968 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
969#endif
970 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
971 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
972 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
973 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
974#ifdef VBOX_WITH_REM
975 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
976 rc = emR3RemStep(pVM, pVCpu);
977#endif
978 else
979 {
980 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
981 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
982 rc = VINF_EM_DBG_STEPPED;
983 }
984 break;
985
986 /*
987 * Simple events: stepped, breakpoint, stop/assertion.
988 */
989 case VINF_EM_DBG_STEPPED:
990 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
991 break;
992
993 case VINF_EM_DBG_BREAKPOINT:
994 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
995 break;
996
997 case VINF_EM_DBG_STOP:
998 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
999 break;
1000
1001 case VINF_EM_DBG_EVENT:
1002 rc = DBGFR3EventHandlePending(pVM, pVCpu);
1003 break;
1004
1005 case VINF_EM_DBG_HYPER_STEPPED:
1006 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
1007 break;
1008
1009 case VINF_EM_DBG_HYPER_BREAKPOINT:
1010 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
1011 break;
1012
1013 case VINF_EM_DBG_HYPER_ASSERTION:
1014 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1015 RTLogFlush(NULL);
1016 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1017 break;
1018
1019 /*
1020 * Guru meditation.
1021 */
1022 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
1023 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
1024 break;
1025 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
1026 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
1027 break;
1028 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
1029 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
1030 break;
1031
1032 default: /** @todo don't use default for guru, but make special errors code! */
1033 {
1034 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1035 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
1036 break;
1037 }
1038 }
1039
1040 /*
1041 * Process the result.
1042 */
1043 switch (VBOXSTRICTRC_VAL(rc))
1044 {
1045 /*
1046 * Continue the debugging loop.
1047 */
1048 case VINF_EM_DBG_STEP:
1049 case VINF_EM_DBG_STOP:
1050 case VINF_EM_DBG_EVENT:
1051 case VINF_EM_DBG_STEPPED:
1052 case VINF_EM_DBG_BREAKPOINT:
1053 case VINF_EM_DBG_HYPER_STEPPED:
1054 case VINF_EM_DBG_HYPER_BREAKPOINT:
1055 case VINF_EM_DBG_HYPER_ASSERTION:
1056 break;
1057
1058 /*
1059 * Resuming execution (in some form) has to be done here if we got
1060 * a hypervisor debug event.
1061 */
1062 case VINF_SUCCESS:
1063 case VINF_EM_RESUME:
1064 case VINF_EM_SUSPEND:
1065 case VINF_EM_RESCHEDULE:
1066 case VINF_EM_RESCHEDULE_RAW:
1067 case VINF_EM_RESCHEDULE_REM:
1068 case VINF_EM_HALT:
1069 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1070 {
1071#ifdef VBOX_WITH_RAW_MODE
1072 rc = emR3RawResumeHyper(pVM, pVCpu);
1073 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
1074 continue;
1075#else
1076 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
1077#endif
1078 }
1079 if (rc == VINF_SUCCESS)
1080 rc = VINF_EM_RESCHEDULE;
1081 return rc;
1082
1083 /*
1084 * The debugger isn't attached.
1085 * We'll simply turn the thing off since that's the easiest thing to do.
1086 */
1087 case VERR_DBGF_NOT_ATTACHED:
1088 switch (VBOXSTRICTRC_VAL(rcLast))
1089 {
1090 case VINF_EM_DBG_HYPER_STEPPED:
1091 case VINF_EM_DBG_HYPER_BREAKPOINT:
1092 case VINF_EM_DBG_HYPER_ASSERTION:
1093 case VERR_TRPM_PANIC:
1094 case VERR_TRPM_DONT_PANIC:
1095 case VERR_VMM_RING0_ASSERTION:
1096 case VERR_VMM_HYPER_CR3_MISMATCH:
1097 case VERR_VMM_RING3_CALL_DISABLED:
1098 return rcLast;
1099 }
1100 return VINF_EM_OFF;
1101
1102 /*
1103 * Status codes terminating the VM in one or another sense.
1104 */
1105 case VINF_EM_TERMINATE:
1106 case VINF_EM_OFF:
1107 case VINF_EM_RESET:
1108 case VINF_EM_NO_MEMORY:
1109 case VINF_EM_RAW_STALE_SELECTOR:
1110 case VINF_EM_RAW_IRET_TRAP:
1111 case VERR_TRPM_PANIC:
1112 case VERR_TRPM_DONT_PANIC:
1113 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1114 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1115 case VERR_VMM_RING0_ASSERTION:
1116 case VERR_VMM_HYPER_CR3_MISMATCH:
1117 case VERR_VMM_RING3_CALL_DISABLED:
1118 case VERR_INTERNAL_ERROR:
1119 case VERR_INTERNAL_ERROR_2:
1120 case VERR_INTERNAL_ERROR_3:
1121 case VERR_INTERNAL_ERROR_4:
1122 case VERR_INTERNAL_ERROR_5:
1123 case VERR_IPE_UNEXPECTED_STATUS:
1124 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1125 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1126 return rc;
1127
1128 /*
1129 * The rest is unexpected, and will keep us here.
1130 */
1131 default:
1132 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1133 break;
1134 }
1135 } /* debug for ever */
1136}
1137
1138
1139#if defined(VBOX_WITH_REM) || defined(DEBUG)
1140/**
1141 * Steps recompiled code.
1142 *
1143 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1144 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1145 *
1146 * @param pVM The cross context VM structure.
1147 * @param pVCpu The cross context virtual CPU structure.
1148 */
1149static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1150{
1151 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1152
1153# ifdef VBOX_WITH_REM
1154 EMRemLock(pVM);
1155
1156 /*
1157 * Switch to REM, step instruction, switch back.
1158 */
1159 int rc = REMR3State(pVM, pVCpu);
1160 if (RT_SUCCESS(rc))
1161 {
1162 rc = REMR3Step(pVM, pVCpu);
1163 REMR3StateBack(pVM, pVCpu);
1164 }
1165 EMRemUnlock(pVM);
1166
1167# else
1168 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1169# endif
1170
1171 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1172 return rc;
1173}
1174#endif /* VBOX_WITH_REM || DEBUG */
1175
1176
1177#ifdef VBOX_WITH_REM
1178/**
1179 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1180 * critical section.
1181 *
1182 * @returns false - new fInREMState value.
1183 * @param pVM The cross context VM structure.
1184 * @param pVCpu The cross context virtual CPU structure.
1185 */
1186DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1187{
1188 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1189 REMR3StateBack(pVM, pVCpu);
1190 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1191
1192 EMRemUnlock(pVM);
1193 return false;
1194}
1195#endif
1196
1197
1198/**
1199 * Executes recompiled code.
1200 *
1201 * This function contains the recompiler version of the inner
1202 * execution loop (the outer loop being in EMR3ExecuteVM()).
1203 *
1204 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1205 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1206 *
1207 * @param pVM The cross context VM structure.
1208 * @param pVCpu The cross context virtual CPU structure.
1209 * @param pfFFDone Where to store an indicator telling whether or not
1210 * FFs were done before returning.
1211 *
1212 */
1213static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1214{
1215#ifdef LOG_ENABLED
1216 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1217 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1218
1219 if (pCtx->eflags.Bits.u1VM)
1220 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1221 else
1222 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1223#endif
1224 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1225
1226#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1227 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1228 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1229 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1230#endif
1231
1232 /*
1233 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1234 * or the REM suggests raw-mode execution.
1235 */
1236 *pfFFDone = false;
1237#ifdef VBOX_WITH_REM
1238 bool fInREMState = false;
1239#else
1240 uint32_t cLoops = 0;
1241#endif
1242 int rc = VINF_SUCCESS;
1243 for (;;)
1244 {
1245#ifdef VBOX_WITH_REM
1246 /*
1247 * Lock REM and update the state if not already in sync.
1248 *
1249 * Note! Big lock, but you are not supposed to own any lock when
1250 * coming in here.
1251 */
1252 if (!fInREMState)
1253 {
1254 EMRemLock(pVM);
1255 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1256
1257 /* Flush the recompiler translation blocks if the VCPU has changed,
1258 also force a full CPU state resync. */
1259 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1260 {
1261 REMFlushTBs(pVM);
1262 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1263 }
1264 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1265
1266 rc = REMR3State(pVM, pVCpu);
1267
1268 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1269 if (RT_FAILURE(rc))
1270 break;
1271 fInREMState = true;
1272
1273 /*
1274 * We might have missed the raising of VMREQ, TIMER and some other
1275 * important FFs while we were busy switching the state. So, check again.
1276 */
1277 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1278 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1279 {
1280 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1281 goto l_REMDoForcedActions;
1282 }
1283 }
1284#endif
1285
1286 /*
1287 * Execute REM.
1288 */
1289 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1290 {
1291 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1292#ifdef VBOX_WITH_REM
1293 rc = REMR3Run(pVM, pVCpu);
1294#else
1295 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1296#endif
1297 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1298 }
1299 else
1300 {
1301 /* Give up this time slice; virtual time continues */
1302 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1303 RTThreadSleep(5);
1304 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1305 rc = VINF_SUCCESS;
1306 }
1307
1308 /*
1309 * Deal with high priority post execution FFs before doing anything
1310 * else. Sync back the state and leave the lock to be on the safe side.
1311 */
1312 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1313 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1314 {
1315#ifdef VBOX_WITH_REM
1316 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1317#endif
1318 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1319 }
1320
1321 /*
1322 * Process the returned status code.
1323 */
1324 if (rc != VINF_SUCCESS)
1325 {
1326 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1327 break;
1328 if (rc != VINF_REM_INTERRUPED_FF)
1329 {
1330#ifndef VBOX_WITH_REM
1331 /* Try dodge unimplemented IEM trouble by reschduling. */
1332 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1333 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1334 {
1335 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1336 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1337 {
1338 rc = VINF_EM_RESCHEDULE;
1339 break;
1340 }
1341 }
1342#endif
1343
1344 /*
1345 * Anything which is not known to us means an internal error
1346 * and the termination of the VM!
1347 */
1348 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1349 break;
1350 }
1351 }
1352
1353
1354 /*
1355 * Check and execute forced actions.
1356 *
1357 * Sync back the VM state and leave the lock before calling any of
1358 * these, you never know what's going to happen here.
1359 */
1360#ifdef VBOX_HIGH_RES_TIMERS_HACK
1361 TMTimerPollVoid(pVM, pVCpu);
1362#endif
1363 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1364 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1365 || VMCPU_FF_IS_PENDING(pVCpu,
1366 VMCPU_FF_ALL_REM_MASK
1367 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1368 {
1369#ifdef VBOX_WITH_REM
1370l_REMDoForcedActions:
1371 if (fInREMState)
1372 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1373#endif
1374 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1375 rc = emR3ForcedActions(pVM, pVCpu, rc);
1376 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1377 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1378 if ( rc != VINF_SUCCESS
1379 && rc != VINF_EM_RESCHEDULE_REM)
1380 {
1381 *pfFFDone = true;
1382 break;
1383 }
1384 }
1385
1386#ifndef VBOX_WITH_REM
1387 /*
1388 * Have to check if we can get back to fast execution mode every so often.
1389 */
1390 if (!(++cLoops & 7))
1391 {
1392 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1393 if ( enmCheck != EMSTATE_REM
1394 && enmCheck != EMSTATE_IEM_THEN_REM)
1395 return VINF_EM_RESCHEDULE;
1396 }
1397#endif
1398
1399 } /* The Inner Loop, recompiled execution mode version. */
1400
1401
1402#ifdef VBOX_WITH_REM
1403 /*
1404 * Returning. Sync back the VM state if required.
1405 */
1406 if (fInREMState)
1407 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1408#endif
1409
1410 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1411 return rc;
1412}
1413
1414
1415#ifdef DEBUG
1416
1417int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1418{
1419 EMSTATE enmOldState = pVCpu->em.s.enmState;
1420
1421 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1422
1423 Log(("Single step BEGIN:\n"));
1424 for (uint32_t i = 0; i < cIterations; i++)
1425 {
1426 DBGFR3PrgStep(pVCpu);
1427 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1428 emR3RemStep(pVM, pVCpu);
1429 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1430 break;
1431 }
1432 Log(("Single step END:\n"));
1433 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1434 pVCpu->em.s.enmState = enmOldState;
1435 return VINF_EM_RESCHEDULE;
1436}
1437
1438#endif /* DEBUG */
1439
1440
1441/**
1442 * Try execute the problematic code in IEM first, then fall back on REM if there
1443 * is too much of it or if IEM doesn't implement something.
1444 *
1445 * @returns Strict VBox status code from IEMExecLots.
1446 * @param pVM The cross context VM structure.
1447 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1448 * @param pfFFDone Force flags done indicator.
1449 *
1450 * @thread EMT(pVCpu)
1451 */
1452static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1453{
1454 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1455 *pfFFDone = false;
1456
1457 /*
1458 * Execute in IEM for a while.
1459 */
1460 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1461 {
1462 uint32_t cInstructions;
1463 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1464 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1465 if (rcStrict != VINF_SUCCESS)
1466 {
1467 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1468 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1469 break;
1470
1471 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1472 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1473 return rcStrict;
1474 }
1475
1476 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1477 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1478 {
1479 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1480 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1481 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1482 pVCpu->em.s.enmState = enmNewState;
1483 return VINF_SUCCESS;
1484 }
1485
1486 /*
1487 * Check for pending actions.
1488 */
1489 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1490 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1491 return VINF_SUCCESS;
1492 }
1493
1494 /*
1495 * Switch to REM.
1496 */
1497 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1498 pVCpu->em.s.enmState = EMSTATE_REM;
1499 return VINF_SUCCESS;
1500}
1501
1502
1503/**
1504 * Decides whether to execute RAW, HWACC or REM.
1505 *
1506 * @returns new EM state
1507 * @param pVM The cross context VM structure.
1508 * @param pVCpu The cross context virtual CPU structure.
1509 * @param pCtx Pointer to the guest CPU context.
1510 */
1511EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1512{
1513 /*
1514 * When forcing raw-mode execution, things are simple.
1515 */
1516 if (pVCpu->em.s.fForceRAW)
1517 return EMSTATE_RAW;
1518
1519 /*
1520 * We stay in the wait for SIPI state unless explicitly told otherwise.
1521 */
1522 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1523 return EMSTATE_WAIT_SIPI;
1524
1525 /*
1526 * Execute everything in IEM?
1527 */
1528 if (pVM->em.s.fIemExecutesAll)
1529 return EMSTATE_IEM;
1530
1531 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1532 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1533 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1534
1535 X86EFLAGS EFlags = pCtx->eflags;
1536 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1537 {
1538 if (EMIsHwVirtExecutionEnabled(pVM))
1539 {
1540 if (VM_IS_HM_ENABLED(pVM))
1541 {
1542 if (HMR3CanExecuteGuest(pVM, pCtx))
1543 return EMSTATE_HM;
1544 }
1545 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1546 return EMSTATE_NEM;
1547
1548 /*
1549 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1550 * turns off monitoring features essential for raw mode!
1551 */
1552 return EMSTATE_IEM_THEN_REM;
1553 }
1554 }
1555
1556 /*
1557 * Standard raw-mode:
1558 *
1559 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1560 * or 32 bits protected mode ring 0 code
1561 *
1562 * The tests are ordered by the likelihood of being true during normal execution.
1563 */
1564 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1565 {
1566 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1567 return EMSTATE_REM;
1568 }
1569
1570# ifndef VBOX_RAW_V86
1571 if (EFlags.u32 & X86_EFL_VM) {
1572 Log2(("raw mode refused: VM_MASK\n"));
1573 return EMSTATE_REM;
1574 }
1575# endif
1576
1577 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1578 uint32_t u32CR0 = pCtx->cr0;
1579 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1580 {
1581 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1582 return EMSTATE_REM;
1583 }
1584
1585 if (pCtx->cr4 & X86_CR4_PAE)
1586 {
1587 uint32_t u32Dummy, u32Features;
1588
1589 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1590 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1591 return EMSTATE_REM;
1592 }
1593
1594 unsigned uSS = pCtx->ss.Sel;
1595 if ( pCtx->eflags.Bits.u1VM
1596 || (uSS & X86_SEL_RPL) == 3)
1597 {
1598 if (!EMIsRawRing3Enabled(pVM))
1599 return EMSTATE_REM;
1600
1601 if (!(EFlags.u32 & X86_EFL_IF))
1602 {
1603 Log2(("raw mode refused: IF (RawR3)\n"));
1604 return EMSTATE_REM;
1605 }
1606
1607 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1608 {
1609 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1610 return EMSTATE_REM;
1611 }
1612 }
1613 else
1614 {
1615 if (!EMIsRawRing0Enabled(pVM))
1616 return EMSTATE_REM;
1617
1618 if (EMIsRawRing1Enabled(pVM))
1619 {
1620 /* Only ring 0 and 1 supervisor code. */
1621 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1622 {
1623 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1624 return EMSTATE_REM;
1625 }
1626 }
1627 /* Only ring 0 supervisor code. */
1628 else if ((uSS & X86_SEL_RPL) != 0)
1629 {
1630 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1631 return EMSTATE_REM;
1632 }
1633
1634 // Let's start with pure 32 bits ring 0 code first
1635 /** @todo What's pure 32-bit mode? flat? */
1636 if ( !(pCtx->ss.Attr.n.u1DefBig)
1637 || !(pCtx->cs.Attr.n.u1DefBig))
1638 {
1639 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1640 return EMSTATE_REM;
1641 }
1642
1643 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1644 if (!(u32CR0 & X86_CR0_WP))
1645 {
1646 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1647 return EMSTATE_REM;
1648 }
1649
1650# ifdef VBOX_WITH_RAW_MODE
1651 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1652 {
1653 Log2(("raw r0 mode forced: patch code\n"));
1654# ifdef VBOX_WITH_SAFE_STR
1655 Assert(pCtx->tr.Sel);
1656# endif
1657 return EMSTATE_RAW;
1658 }
1659# endif /* VBOX_WITH_RAW_MODE */
1660
1661# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1662 if (!(EFlags.u32 & X86_EFL_IF))
1663 {
1664 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1665 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1666 return EMSTATE_REM;
1667 }
1668# endif
1669
1670# ifndef VBOX_WITH_RAW_RING1
1671 /** @todo still necessary??? */
1672 if (EFlags.Bits.u2IOPL != 0)
1673 {
1674 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1675 return EMSTATE_REM;
1676 }
1677# endif
1678 }
1679
1680 /*
1681 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1682 */
1683 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1684 {
1685 Log2(("raw mode refused: stale CS\n"));
1686 return EMSTATE_REM;
1687 }
1688 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1689 {
1690 Log2(("raw mode refused: stale SS\n"));
1691 return EMSTATE_REM;
1692 }
1693 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1694 {
1695 Log2(("raw mode refused: stale DS\n"));
1696 return EMSTATE_REM;
1697 }
1698 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1699 {
1700 Log2(("raw mode refused: stale ES\n"));
1701 return EMSTATE_REM;
1702 }
1703 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1704 {
1705 Log2(("raw mode refused: stale FS\n"));
1706 return EMSTATE_REM;
1707 }
1708 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1709 {
1710 Log2(("raw mode refused: stale GS\n"));
1711 return EMSTATE_REM;
1712 }
1713
1714# ifdef VBOX_WITH_SAFE_STR
1715 if (pCtx->tr.Sel == 0)
1716 {
1717 Log(("Raw mode refused -> TR=0\n"));
1718 return EMSTATE_REM;
1719 }
1720# endif
1721
1722 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1723 return EMSTATE_RAW;
1724}
1725
1726
1727/**
1728 * Executes all high priority post execution force actions.
1729 *
1730 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1731 * fatal error status code.
1732 *
1733 * @param pVM The cross context VM structure.
1734 * @param pVCpu The cross context virtual CPU structure.
1735 * @param rc The current strict VBox status code rc.
1736 */
1737VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1738{
1739 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1740
1741 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1742 PDMCritSectBothFF(pVCpu);
1743
1744 /* Update CR3 (Nested Paging case for HM). */
1745 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1746 {
1747 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1748 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1749 if (RT_FAILURE(rc2))
1750 return rc2;
1751 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1752 }
1753
1754 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1755 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1756 {
1757 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1758 if (CPUMIsGuestInPAEMode(pVCpu))
1759 {
1760 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1761 AssertPtr(pPdpes);
1762
1763 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1764 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1765 }
1766 else
1767 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1768 }
1769
1770 /* IEM has pending work (typically memory write after INS instruction). */
1771 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1772 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1773
1774 /* IOM has pending work (comitting an I/O or MMIO write). */
1775 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1776 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1777
1778#ifdef VBOX_WITH_RAW_MODE
1779 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1780 CSAMR3DoPendingAction(pVM, pVCpu);
1781#endif
1782
1783 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1784 {
1785 if ( rc > VINF_EM_NO_MEMORY
1786 && rc <= VINF_EM_LAST)
1787 rc = VINF_EM_NO_MEMORY;
1788 }
1789
1790 return rc;
1791}
1792
1793#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1794/**
1795 * Helper for emR3ForcedActions() for injecting interrupts into the
1796 * nested-guest.
1797 *
1798 * @returns VBox status code.
1799 * @param pVCpu The cross context virtual CPU structure.
1800 * @param pCtx Pointer to the nested-guest CPU context.
1801 * @param pfResched Where to store whether a reschedule is required.
1802 * @param pfInject Where to store whether an interrupt was injected (and if
1803 * a wake up is pending).
1804 */
1805static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1806{
1807 *pfResched = false;
1808 *pfInject = false;
1809 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1810 {
1811 PVM pVM = pVCpu->CTX_SUFF(pVM);
1812 Assert(pCtx->hwvirt.fGif);
1813 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
1814#ifdef VBOX_WITH_RAW_MODE
1815 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1816#endif
1817 if (fVirtualGif)
1818 {
1819 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1820 {
1821 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1822 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1823 {
1824 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1825 {
1826 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1827 if (RT_SUCCESS(rcStrict))
1828 {
1829 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1830 * doesn't intercept HLT but intercepts INTR? */
1831 *pfResched = true;
1832 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1833 if (rcStrict == VINF_SVM_VMEXIT)
1834 return VINF_SUCCESS;
1835 return VBOXSTRICTRC_VAL(rcStrict);
1836 }
1837
1838 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1839 return VINF_EM_TRIPLE_FAULT;
1840 }
1841
1842 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1843 /** @todo this really isn't nice, should properly handle this */
1844 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1845 Assert(rc != VINF_PGM_CHANGE_MODE);
1846 if (rc == VINF_SVM_VMEXIT)
1847 rc = VINF_SUCCESS;
1848 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1849 || rc == VINF_EM_RESCHEDULE_HM
1850 || rc == VINF_EM_RESCHEDULE_RAW))
1851 {
1852 rc = VINF_EM_RESCHEDULE;
1853 }
1854
1855 *pfResched = true;
1856 *pfInject = true;
1857 return rc;
1858 }
1859 }
1860
1861 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1862 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1863 {
1864 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1865 {
1866 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1867 if (RT_SUCCESS(rcStrict))
1868 {
1869 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1870 * doesn't intercept HLT but intercepts VINTR? */
1871 *pfResched = true;
1872 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1873 if (rcStrict == VINF_SVM_VMEXIT)
1874 return VINF_SUCCESS;
1875 return VBOXSTRICTRC_VAL(rcStrict);
1876 }
1877
1878 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1879 return VINF_EM_TRIPLE_FAULT;
1880 }
1881
1882 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1883 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1884 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1885 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1886 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1887
1888 *pfResched = true;
1889 *pfInject = true;
1890 return VINF_EM_RESCHEDULE;
1891 }
1892 }
1893 return VINF_SUCCESS;
1894 }
1895
1896 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1897 { /** @todo Nested VMX. */ }
1898
1899 /* Shouldn't really get here. */
1900 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1901 return VERR_EM_INTERNAL_ERROR;
1902}
1903#endif
1904
1905/**
1906 * Executes all pending forced actions.
1907 *
1908 * Forced actions can cause execution delays and execution
1909 * rescheduling. The first we deal with using action priority, so
1910 * that for instance pending timers aren't scheduled and ran until
1911 * right before execution. The rescheduling we deal with using
1912 * return codes. The same goes for VM termination, only in that case
1913 * we exit everything.
1914 *
1915 * @returns VBox status code of equal or greater importance/severity than rc.
1916 * The most important ones are: VINF_EM_RESCHEDULE,
1917 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1918 *
1919 * @param pVM The cross context VM structure.
1920 * @param pVCpu The cross context virtual CPU structure.
1921 * @param rc The current rc.
1922 *
1923 */
1924int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1925{
1926 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1927#ifdef VBOX_STRICT
1928 int rcIrq = VINF_SUCCESS;
1929#endif
1930 int rc2;
1931#define UPDATE_RC() \
1932 do { \
1933 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1934 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1935 break; \
1936 if (!rc || rc2 < rc) \
1937 rc = rc2; \
1938 } while (0)
1939 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1940
1941 /*
1942 * Post execution chunk first.
1943 */
1944 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1945 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1946 {
1947 /*
1948 * EMT Rendezvous (must be serviced before termination).
1949 */
1950 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1951 {
1952 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1953 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1954 UPDATE_RC();
1955 /** @todo HACK ALERT! The following test is to make sure EM+TM
1956 * thinks the VM is stopped/reset before the next VM state change
1957 * is made. We need a better solution for this, or at least make it
1958 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1959 * VINF_EM_SUSPEND). */
1960 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1961 {
1962 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1963 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1964 return rc;
1965 }
1966 }
1967
1968 /*
1969 * State change request (cleared by vmR3SetStateLocked).
1970 */
1971 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1972 {
1973 VMSTATE enmState = VMR3GetState(pVM);
1974 switch (enmState)
1975 {
1976 case VMSTATE_FATAL_ERROR:
1977 case VMSTATE_FATAL_ERROR_LS:
1978 case VMSTATE_GURU_MEDITATION:
1979 case VMSTATE_GURU_MEDITATION_LS:
1980 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1981 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1982 return VINF_EM_SUSPEND;
1983
1984 case VMSTATE_DESTROYING:
1985 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1986 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1987 return VINF_EM_TERMINATE;
1988
1989 default:
1990 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1991 }
1992 }
1993
1994 /*
1995 * Debugger Facility polling.
1996 */
1997 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1998 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1999 {
2000 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2001 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2002 UPDATE_RC();
2003 }
2004
2005 /*
2006 * Postponed reset request.
2007 */
2008 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
2009 {
2010 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2011 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
2012 UPDATE_RC();
2013 }
2014
2015#ifdef VBOX_WITH_RAW_MODE
2016 /*
2017 * CSAM page scanning.
2018 */
2019 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2020 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
2021 {
2022 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
2023 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
2024 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2025 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2026 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
2027 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
2028 }
2029#endif
2030
2031 /*
2032 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
2033 */
2034 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2035 {
2036 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2037 UPDATE_RC();
2038 if (rc == VINF_EM_NO_MEMORY)
2039 return rc;
2040 }
2041
2042 /* check that we got them all */
2043 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2044 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
2045 }
2046
2047 /*
2048 * Normal priority then.
2049 * (Executed in no particular order.)
2050 */
2051 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
2052 {
2053 /*
2054 * PDM Queues are pending.
2055 */
2056 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
2057 PDMR3QueueFlushAll(pVM);
2058
2059 /*
2060 * PDM DMA transfers are pending.
2061 */
2062 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
2063 PDMR3DmaRun(pVM);
2064
2065 /*
2066 * EMT Rendezvous (make sure they are handled before the requests).
2067 */
2068 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2069 {
2070 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2071 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2072 UPDATE_RC();
2073 /** @todo HACK ALERT! The following test is to make sure EM+TM
2074 * thinks the VM is stopped/reset before the next VM state change
2075 * is made. We need a better solution for this, or at least make it
2076 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2077 * VINF_EM_SUSPEND). */
2078 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2079 {
2080 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2081 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2082 return rc;
2083 }
2084 }
2085
2086 /*
2087 * Requests from other threads.
2088 */
2089 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
2090 {
2091 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2092 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
2093 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
2094 {
2095 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2096 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2097 return rc2;
2098 }
2099 UPDATE_RC();
2100 /** @todo HACK ALERT! The following test is to make sure EM+TM
2101 * thinks the VM is stopped/reset before the next VM state change
2102 * is made. We need a better solution for this, or at least make it
2103 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2104 * VINF_EM_SUSPEND). */
2105 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2106 {
2107 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2108 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2109 return rc;
2110 }
2111 }
2112
2113#ifdef VBOX_WITH_REM
2114 /* Replay the handler notification changes. */
2115 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2116 {
2117 /* Try not to cause deadlocks. */
2118 if ( pVM->cCpus == 1
2119 || ( !PGMIsLockOwner(pVM)
2120 && !IOMIsLockWriteOwner(pVM))
2121 )
2122 {
2123 EMRemLock(pVM);
2124 REMR3ReplayHandlerNotifications(pVM);
2125 EMRemUnlock(pVM);
2126 }
2127 }
2128#endif
2129
2130 /* check that we got them all */
2131 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2132 }
2133
2134 /*
2135 * Normal priority then. (per-VCPU)
2136 * (Executed in no particular order.)
2137 */
2138 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2139 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2140 {
2141 /*
2142 * Requests from other threads.
2143 */
2144 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2145 {
2146 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2147 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2148 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2149 {
2150 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2151 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2152 return rc2;
2153 }
2154 UPDATE_RC();
2155 /** @todo HACK ALERT! The following test is to make sure EM+TM
2156 * thinks the VM is stopped/reset before the next VM state change
2157 * is made. We need a better solution for this, or at least make it
2158 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2159 * VINF_EM_SUSPEND). */
2160 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2161 {
2162 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2163 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2164 return rc;
2165 }
2166 }
2167
2168 /* check that we got them all */
2169 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2170 }
2171
2172 /*
2173 * High priority pre execution chunk last.
2174 * (Executed in ascending priority order.)
2175 */
2176 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2177 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2178 {
2179 /*
2180 * Timers before interrupts.
2181 */
2182 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2183 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2184 TMR3TimerQueuesDo(pVM);
2185
2186 /*
2187 * Pick up asynchronously posted interrupts into the APIC.
2188 */
2189 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2190 APICUpdatePendingInterrupts(pVCpu);
2191
2192 /*
2193 * The instruction following an emulated STI should *always* be executed!
2194 *
2195 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2196 * the eip is the same as the inhibited instr address. Before we
2197 * are able to execute this instruction in raw mode (iret to
2198 * guest code) an external interrupt might force a world switch
2199 * again. Possibly allowing a guest interrupt to be dispatched
2200 * in the process. This could break the guest. Sounds very
2201 * unlikely, but such timing sensitive problem are not as rare as
2202 * you might think.
2203 */
2204 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2205 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2206 {
2207 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2208 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2209 {
2210 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2211 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2212 }
2213 else
2214 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2215 }
2216
2217 /*
2218 * Interrupts.
2219 */
2220 bool fWakeupPending = false;
2221 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2222 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2223 {
2224 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2225 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2226 {
2227 Assert(!HMR3IsEventPending(pVCpu));
2228 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2229#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2230 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2231 {
2232 bool fResched, fInject;
2233 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2234 if (fInject)
2235 {
2236 fWakeupPending = true;
2237# ifdef VBOX_STRICT
2238 rcIrq = rc2;
2239# endif
2240 }
2241 if (fResched)
2242 UPDATE_RC();
2243 }
2244 else
2245#endif
2246 {
2247 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2248 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2249#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2250 && pCtx->hwvirt.fGif
2251#endif
2252#ifdef VBOX_WITH_RAW_MODE
2253 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2254#endif
2255 && pCtx->eflags.Bits.u1IF)
2256 {
2257 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2258 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2259 /** @todo this really isn't nice, should properly handle this */
2260 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2261 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2262 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2263 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2264 || rc2 == VINF_EM_RESCHEDULE_HM
2265 || rc2 == VINF_EM_RESCHEDULE_RAW))
2266 {
2267 rc2 = VINF_EM_RESCHEDULE;
2268 }
2269#ifdef VBOX_STRICT
2270 rcIrq = rc2;
2271#endif
2272 UPDATE_RC();
2273 /* Reschedule required: We must not miss the wakeup below! */
2274 fWakeupPending = true;
2275 }
2276 }
2277 }
2278 }
2279
2280 /*
2281 * Allocate handy pages.
2282 */
2283 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2284 {
2285 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2286 UPDATE_RC();
2287 }
2288
2289 /*
2290 * Debugger Facility request.
2291 */
2292 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2293 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2294 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2295 {
2296 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2297 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2298 UPDATE_RC();
2299 }
2300
2301 /*
2302 * EMT Rendezvous (must be serviced before termination).
2303 */
2304 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2305 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2306 {
2307 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2308 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2309 UPDATE_RC();
2310 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2311 * stopped/reset before the next VM state change is made. We need a better
2312 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2313 * && rc >= VINF_EM_SUSPEND). */
2314 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2315 {
2316 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2317 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2318 return rc;
2319 }
2320 }
2321
2322 /*
2323 * State change request (cleared by vmR3SetStateLocked).
2324 */
2325 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2326 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2327 {
2328 VMSTATE enmState = VMR3GetState(pVM);
2329 switch (enmState)
2330 {
2331 case VMSTATE_FATAL_ERROR:
2332 case VMSTATE_FATAL_ERROR_LS:
2333 case VMSTATE_GURU_MEDITATION:
2334 case VMSTATE_GURU_MEDITATION_LS:
2335 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2336 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2337 return VINF_EM_SUSPEND;
2338
2339 case VMSTATE_DESTROYING:
2340 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2341 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2342 return VINF_EM_TERMINATE;
2343
2344 default:
2345 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2346 }
2347 }
2348
2349 /*
2350 * Out of memory? Since most of our fellow high priority actions may cause us
2351 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2352 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2353 * than us since we can terminate without allocating more memory.
2354 */
2355 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2356 {
2357 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2358 UPDATE_RC();
2359 if (rc == VINF_EM_NO_MEMORY)
2360 return rc;
2361 }
2362
2363 /*
2364 * If the virtual sync clock is still stopped, make TM restart it.
2365 */
2366 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2367 TMR3VirtualSyncFF(pVM, pVCpu);
2368
2369#ifdef DEBUG
2370 /*
2371 * Debug, pause the VM.
2372 */
2373 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2374 {
2375 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2376 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2377 return VINF_EM_SUSPEND;
2378 }
2379#endif
2380
2381 /* check that we got them all */
2382 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2383 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2384 }
2385
2386#undef UPDATE_RC
2387 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2388 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2389 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2390 return rc;
2391}
2392
2393
2394/**
2395 * Check if the preset execution time cap restricts guest execution scheduling.
2396 *
2397 * @returns true if allowed, false otherwise
2398 * @param pVM The cross context VM structure.
2399 * @param pVCpu The cross context virtual CPU structure.
2400 */
2401bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2402{
2403 uint64_t u64UserTime, u64KernelTime;
2404
2405 if ( pVM->uCpuExecutionCap != 100
2406 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2407 {
2408 uint64_t u64TimeNow = RTTimeMilliTS();
2409 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2410 {
2411 /* New time slice. */
2412 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2413 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2414 pVCpu->em.s.u64TimeSliceExec = 0;
2415 }
2416 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2417
2418 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2419 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2420 return false;
2421 }
2422 return true;
2423}
2424
2425
2426/**
2427 * Execute VM.
2428 *
2429 * This function is the main loop of the VM. The emulation thread
2430 * calls this function when the VM has been successfully constructed
2431 * and we're ready for executing the VM.
2432 *
2433 * Returning from this function means that the VM is turned off or
2434 * suspended (state already saved) and deconstruction is next in line.
2435 *
2436 * All interaction from other thread are done using forced actions
2437 * and signaling of the wait object.
2438 *
2439 * @returns VBox status code, informational status codes may indicate failure.
2440 * @param pVM The cross context VM structure.
2441 * @param pVCpu The cross context virtual CPU structure.
2442 */
2443VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2444{
2445 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2446 pVM,
2447 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2448 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2449 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2450 pVCpu->em.s.fForceRAW));
2451 VM_ASSERT_EMT(pVM);
2452 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2453 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2454 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2455 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2456
2457 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2458 if (rc == 0)
2459 {
2460 /*
2461 * Start the virtual time.
2462 */
2463 TMR3NotifyResume(pVM, pVCpu);
2464
2465 /*
2466 * The Outer Main Loop.
2467 */
2468 bool fFFDone = false;
2469
2470 /* Reschedule right away to start in the right state. */
2471 rc = VINF_SUCCESS;
2472
2473 /* If resuming after a pause or a state load, restore the previous
2474 state or else we'll start executing code. Else, just reschedule. */
2475 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2476 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2477 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2478 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2479 else
2480 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2481 pVCpu->em.s.cIemThenRemInstructions = 0;
2482 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2483
2484 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2485 for (;;)
2486 {
2487 /*
2488 * Before we can schedule anything (we're here because
2489 * scheduling is required) we must service any pending
2490 * forced actions to avoid any pending action causing
2491 * immediate rescheduling upon entering an inner loop
2492 *
2493 * Do forced actions.
2494 */
2495 if ( !fFFDone
2496 && RT_SUCCESS(rc)
2497 && rc != VINF_EM_TERMINATE
2498 && rc != VINF_EM_OFF
2499 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2500 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2501 {
2502 rc = emR3ForcedActions(pVM, pVCpu, rc);
2503 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2504 if ( ( rc == VINF_EM_RESCHEDULE_REM
2505 || rc == VINF_EM_RESCHEDULE_HM)
2506 && pVCpu->em.s.fForceRAW)
2507 rc = VINF_EM_RESCHEDULE_RAW;
2508 }
2509 else if (fFFDone)
2510 fFFDone = false;
2511
2512 /*
2513 * Now what to do?
2514 */
2515 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2516 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2517 switch (rc)
2518 {
2519 /*
2520 * Keep doing what we're currently doing.
2521 */
2522 case VINF_SUCCESS:
2523 break;
2524
2525 /*
2526 * Reschedule - to raw-mode execution.
2527 */
2528/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2529 case VINF_EM_RESCHEDULE_RAW:
2530 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2531 if (VM_IS_RAW_MODE_ENABLED(pVM))
2532 {
2533 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2534 pVCpu->em.s.enmState = EMSTATE_RAW;
2535 }
2536 else
2537 {
2538 AssertLogRelFailed();
2539 pVCpu->em.s.enmState = EMSTATE_NONE;
2540 }
2541 break;
2542
2543 /*
2544 * Reschedule - to HM or NEM.
2545 */
2546 case VINF_EM_RESCHEDULE_HM:
2547 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2548 Assert(!pVCpu->em.s.fForceRAW);
2549 if (VM_IS_HM_ENABLED(pVM))
2550 {
2551 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2552 pVCpu->em.s.enmState = EMSTATE_HM;
2553 }
2554 else if (VM_IS_NEM_ENABLED(pVM))
2555 {
2556 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2557 pVCpu->em.s.enmState = EMSTATE_NEM;
2558 }
2559 else
2560 {
2561 AssertLogRelFailed();
2562 pVCpu->em.s.enmState = EMSTATE_NONE;
2563 }
2564 break;
2565
2566 /*
2567 * Reschedule - to recompiled execution.
2568 */
2569 case VINF_EM_RESCHEDULE_REM:
2570 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2571 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2572 {
2573 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2574 enmOldState, EMSTATE_IEM_THEN_REM));
2575 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2576 {
2577 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2578 pVCpu->em.s.cIemThenRemInstructions = 0;
2579 }
2580 }
2581 else
2582 {
2583 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2584 pVCpu->em.s.enmState = EMSTATE_REM;
2585 }
2586 break;
2587
2588 /*
2589 * Resume.
2590 */
2591 case VINF_EM_RESUME:
2592 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2593 /* Don't reschedule in the halted or wait for SIPI case. */
2594 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2595 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2596 {
2597 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2598 break;
2599 }
2600 /* fall through and get scheduled. */
2601 RT_FALL_THRU();
2602
2603 /*
2604 * Reschedule.
2605 */
2606 case VINF_EM_RESCHEDULE:
2607 {
2608 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2609 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2610 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2611 pVCpu->em.s.cIemThenRemInstructions = 0;
2612 pVCpu->em.s.enmState = enmState;
2613 break;
2614 }
2615
2616 /*
2617 * Halted.
2618 */
2619 case VINF_EM_HALT:
2620 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2621 pVCpu->em.s.enmState = EMSTATE_HALTED;
2622 break;
2623
2624 /*
2625 * Switch to the wait for SIPI state (application processor only)
2626 */
2627 case VINF_EM_WAIT_SIPI:
2628 Assert(pVCpu->idCpu != 0);
2629 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2630 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2631 break;
2632
2633
2634 /*
2635 * Suspend.
2636 */
2637 case VINF_EM_SUSPEND:
2638 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2639 Assert(enmOldState != EMSTATE_SUSPENDED);
2640 pVCpu->em.s.enmPrevState = enmOldState;
2641 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2642 break;
2643
2644 /*
2645 * Reset.
2646 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2647 */
2648 case VINF_EM_RESET:
2649 {
2650 if (pVCpu->idCpu == 0)
2651 {
2652 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2653 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2654 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2655 pVCpu->em.s.cIemThenRemInstructions = 0;
2656 pVCpu->em.s.enmState = enmState;
2657 }
2658 else
2659 {
2660 /* All other VCPUs go into the wait for SIPI state. */
2661 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2662 }
2663 break;
2664 }
2665
2666 /*
2667 * Power Off.
2668 */
2669 case VINF_EM_OFF:
2670 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2671 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2672 TMR3NotifySuspend(pVM, pVCpu);
2673 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2674 return rc;
2675
2676 /*
2677 * Terminate the VM.
2678 */
2679 case VINF_EM_TERMINATE:
2680 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2681 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2682 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2683 TMR3NotifySuspend(pVM, pVCpu);
2684 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2685 return rc;
2686
2687
2688 /*
2689 * Out of memory, suspend the VM and stuff.
2690 */
2691 case VINF_EM_NO_MEMORY:
2692 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2693 Assert(enmOldState != EMSTATE_SUSPENDED);
2694 pVCpu->em.s.enmPrevState = enmOldState;
2695 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2696 TMR3NotifySuspend(pVM, pVCpu);
2697 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2698
2699 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2700 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2701 if (rc != VINF_EM_SUSPEND)
2702 {
2703 if (RT_SUCCESS_NP(rc))
2704 {
2705 AssertLogRelMsgFailed(("%Rrc\n", rc));
2706 rc = VERR_EM_INTERNAL_ERROR;
2707 }
2708 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2709 }
2710 return rc;
2711
2712 /*
2713 * Guest debug events.
2714 */
2715 case VINF_EM_DBG_STEPPED:
2716 case VINF_EM_DBG_STOP:
2717 case VINF_EM_DBG_EVENT:
2718 case VINF_EM_DBG_BREAKPOINT:
2719 case VINF_EM_DBG_STEP:
2720 if (enmOldState == EMSTATE_RAW)
2721 {
2722 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2723 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2724 }
2725 else if (enmOldState == EMSTATE_HM)
2726 {
2727 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2728 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2729 }
2730 else if (enmOldState == EMSTATE_NEM)
2731 {
2732 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2733 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2734 }
2735 else if (enmOldState == EMSTATE_REM)
2736 {
2737 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2738 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2739 }
2740 else
2741 {
2742 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2743 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2744 }
2745 break;
2746
2747 /*
2748 * Hypervisor debug events.
2749 */
2750 case VINF_EM_DBG_HYPER_STEPPED:
2751 case VINF_EM_DBG_HYPER_BREAKPOINT:
2752 case VINF_EM_DBG_HYPER_ASSERTION:
2753 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2754 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2755 break;
2756
2757 /*
2758 * Triple fault.
2759 */
2760 case VINF_EM_TRIPLE_FAULT:
2761 if (!pVM->em.s.fGuruOnTripleFault)
2762 {
2763 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2764 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2765 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2766 continue;
2767 }
2768 /* Else fall through and trigger a guru. */
2769 RT_FALL_THRU();
2770
2771 case VERR_VMM_RING0_ASSERTION:
2772 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2773 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2774 break;
2775
2776 /*
2777 * Any error code showing up here other than the ones we
2778 * know and process above are considered to be FATAL.
2779 *
2780 * Unknown warnings and informational status codes are also
2781 * included in this.
2782 */
2783 default:
2784 if (RT_SUCCESS_NP(rc))
2785 {
2786 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2787 rc = VERR_EM_INTERNAL_ERROR;
2788 }
2789 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2790 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2791 break;
2792 }
2793
2794 /*
2795 * Act on state transition.
2796 */
2797 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2798 if (enmOldState != enmNewState)
2799 {
2800 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2801
2802 /* Clear MWait flags and the unhalt FF. */
2803 if ( enmOldState == EMSTATE_HALTED
2804 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2805 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2806 && ( enmNewState == EMSTATE_RAW
2807 || enmNewState == EMSTATE_HM
2808 || enmNewState == EMSTATE_NEM
2809 || enmNewState == EMSTATE_REM
2810 || enmNewState == EMSTATE_IEM_THEN_REM
2811 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2812 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2813 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2814 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2815 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2816 {
2817 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2818 {
2819 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2820 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2821 }
2822 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2823 {
2824 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2825 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2826 }
2827 }
2828 }
2829 else
2830 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2831
2832 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2833 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2834
2835 /*
2836 * Act on the new state.
2837 */
2838 switch (enmNewState)
2839 {
2840 /*
2841 * Execute raw.
2842 */
2843 case EMSTATE_RAW:
2844#ifdef VBOX_WITH_RAW_MODE
2845 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2846#else
2847 AssertLogRelMsgFailed(("%Rrc\n", rc));
2848 rc = VERR_EM_INTERNAL_ERROR;
2849#endif
2850 break;
2851
2852 /*
2853 * Execute hardware accelerated raw.
2854 */
2855 case EMSTATE_HM:
2856 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2857 break;
2858
2859 /*
2860 * Execute hardware accelerated raw.
2861 */
2862 case EMSTATE_NEM:
2863 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2864 break;
2865
2866 /*
2867 * Execute recompiled.
2868 */
2869 case EMSTATE_REM:
2870 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2871 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2872 break;
2873
2874 /*
2875 * Execute in the interpreter.
2876 */
2877 case EMSTATE_IEM:
2878 {
2879#if 0 /* For testing purposes. */
2880 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2881 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2882 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2883 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2884 rc = VINF_SUCCESS;
2885 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2886#endif
2887 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2888 if (pVM->em.s.fIemExecutesAll)
2889 {
2890 Assert(rc != VINF_EM_RESCHEDULE_REM);
2891 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2892 Assert(rc != VINF_EM_RESCHEDULE_HM);
2893 }
2894 fFFDone = false;
2895 break;
2896 }
2897
2898 /*
2899 * Execute in IEM, hoping we can quickly switch aback to HM
2900 * or RAW execution. If our hopes fail, we go to REM.
2901 */
2902 case EMSTATE_IEM_THEN_REM:
2903 {
2904 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2905 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2906 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2907 break;
2908 }
2909
2910 /*
2911 * Application processor execution halted until SIPI.
2912 */
2913 case EMSTATE_WAIT_SIPI:
2914 /* no break */
2915 /*
2916 * hlt - execution halted until interrupt.
2917 */
2918 case EMSTATE_HALTED:
2919 {
2920 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2921 /* If HM (or someone else) store a pending interrupt in
2922 TRPM, it must be dispatched ASAP without any halting.
2923 Anything pending in TRPM has been accepted and the CPU
2924 should already be the right state to receive it. */
2925 if (TRPMHasTrap(pVCpu))
2926 rc = VINF_EM_RESCHEDULE;
2927 /* MWAIT has a special extension where it's woken up when
2928 an interrupt is pending even when IF=0. */
2929 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2930 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2931 {
2932 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2933 if (rc == VINF_SUCCESS)
2934 {
2935 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2936 APICUpdatePendingInterrupts(pVCpu);
2937
2938 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2939 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2940 {
2941 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2942 rc = VINF_EM_RESCHEDULE;
2943 }
2944 }
2945 }
2946 else
2947 {
2948 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2949 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2950 check VMCPU_FF_UPDATE_APIC here. */
2951 if ( rc == VINF_SUCCESS
2952 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2953 {
2954 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2955 rc = VINF_EM_RESCHEDULE;
2956 }
2957 }
2958
2959 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2960 break;
2961 }
2962
2963 /*
2964 * Suspended - return to VM.cpp.
2965 */
2966 case EMSTATE_SUSPENDED:
2967 TMR3NotifySuspend(pVM, pVCpu);
2968 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2969 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2970 return VINF_EM_SUSPEND;
2971
2972 /*
2973 * Debugging in the guest.
2974 */
2975 case EMSTATE_DEBUG_GUEST_RAW:
2976 case EMSTATE_DEBUG_GUEST_HM:
2977 case EMSTATE_DEBUG_GUEST_NEM:
2978 case EMSTATE_DEBUG_GUEST_IEM:
2979 case EMSTATE_DEBUG_GUEST_REM:
2980 TMR3NotifySuspend(pVM, pVCpu);
2981 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2982 TMR3NotifyResume(pVM, pVCpu);
2983 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2984 break;
2985
2986 /*
2987 * Debugging in the hypervisor.
2988 */
2989 case EMSTATE_DEBUG_HYPER:
2990 {
2991 TMR3NotifySuspend(pVM, pVCpu);
2992 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2993
2994 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2995 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2996 if (rc != VINF_SUCCESS)
2997 {
2998 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2999 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3000 else
3001 {
3002 /* switch to guru meditation mode */
3003 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3004 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3005 VMMR3FatalDump(pVM, pVCpu, rc);
3006 }
3007 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3008 return rc;
3009 }
3010
3011 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3012 TMR3NotifyResume(pVM, pVCpu);
3013 break;
3014 }
3015
3016 /*
3017 * Guru meditation takes place in the debugger.
3018 */
3019 case EMSTATE_GURU_MEDITATION:
3020 {
3021 TMR3NotifySuspend(pVM, pVCpu);
3022 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3023 VMMR3FatalDump(pVM, pVCpu, rc);
3024 emR3Debug(pVM, pVCpu, rc);
3025 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3026 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3027 return rc;
3028 }
3029
3030 /*
3031 * The states we don't expect here.
3032 */
3033 case EMSTATE_NONE:
3034 case EMSTATE_TERMINATING:
3035 default:
3036 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3037 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3038 TMR3NotifySuspend(pVM, pVCpu);
3039 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3040 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3041 return VERR_EM_INTERNAL_ERROR;
3042 }
3043 } /* The Outer Main Loop */
3044 }
3045 else
3046 {
3047 /*
3048 * Fatal error.
3049 */
3050 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3051 TMR3NotifySuspend(pVM, pVCpu);
3052 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3053 VMMR3FatalDump(pVM, pVCpu, rc);
3054 emR3Debug(pVM, pVCpu, rc);
3055 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3056 /** @todo change the VM state! */
3057 return rc;
3058 }
3059
3060 /* not reached */
3061}
3062
3063/**
3064 * Notify EM of a state change (used by FTM)
3065 *
3066 * @param pVM The cross context VM structure.
3067 */
3068VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3069{
3070 PVMCPU pVCpu = VMMGetCpu(pVM);
3071
3072 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3073 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3074 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3075 return VINF_SUCCESS;
3076}
3077
3078/**
3079 * Notify EM of a state change (used by FTM)
3080 *
3081 * @param pVM The cross context VM structure.
3082 */
3083VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3084{
3085 PVMCPU pVCpu = VMMGetCpu(pVM);
3086 EMSTATE enmCurState = pVCpu->em.s.enmState;
3087
3088 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3089 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3090 pVCpu->em.s.enmPrevState = enmCurState;
3091 return VINF_SUCCESS;
3092}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette