VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72580

Last change on this file since 72580 was 72580, checked in by vboxsync, 7 years ago

EM,NEM: Added /EM/ExitOptimizationEnabled config option to disable exit optimizations if necessary. Handle return-to-ring-3 cases in the most basic way too. bugref:9044

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 141.8 KB
Line 
1/* $Id: EM.cpp 72580 2018-06-16 15:57:07Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94#if defined(VBOX_WITH_REM) || defined(DEBUG)
95static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
96#endif
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 /**
152 * @cfgm{/EM/ExitOptimizationEnabled, bool, true}
153 * Whether to try correlate exit history, detect hot spots and try optimize
154 * these using IEM if there are other exits close by.
155 */
156 bool fExitOptimizationEnabled = true;
157 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, true);
158 AssertLogRelRCReturn(rc, rc);
159 for (VMCPUID i = 0; i < pVM->cCpus; i++)
160 pVM->aCpus[i].em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
161
162 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool fExitOptimizationEnabled=%RTbool\n",
163 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault, fExitOptimizationEnabled));
164
165#ifdef VBOX_WITH_REM
166 /*
167 * Initialize the REM critical section.
168 */
169 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
170 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
171 AssertRCReturn(rc, rc);
172#endif
173
174 /*
175 * Saved state.
176 */
177 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
178 NULL, NULL, NULL,
179 NULL, emR3Save, NULL,
180 NULL, emR3Load, NULL);
181 if (RT_FAILURE(rc))
182 return rc;
183
184 for (VMCPUID i = 0; i < pVM->cCpus; i++)
185 {
186 PVMCPU pVCpu = &pVM->aCpus[i];
187
188 pVCpu->em.s.enmState = i == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
189 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
190 pVCpu->em.s.fForceRAW = false;
191 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
192 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
193
194 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
195#ifdef VBOX_WITH_RAW_MODE
196 if (VM_IS_RAW_MODE_ENABLED(pVM))
197 {
198 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
199 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
200 }
201#endif
202
203# define EM_REG_COUNTER(a, b, c) \
204 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
205 AssertRC(rc);
206
207# define EM_REG_COUNTER_USED(a, b, c) \
208 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
209 AssertRC(rc);
210
211# define EM_REG_PROFILE(a, b, c) \
212 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
213 AssertRC(rc);
214
215# define EM_REG_PROFILE_ADV(a, b, c) \
216 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
217 AssertRC(rc);
218
219 /*
220 * Statistics.
221 */
222#ifdef VBOX_WITH_STATISTICS
223 PEMSTATS pStats;
224 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
225 if (RT_FAILURE(rc))
226 return rc;
227
228 pVCpu->em.s.pStatsR3 = pStats;
229 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
230 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
231
232 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
233 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
234
235 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
236 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
237
238 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
312
313 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
314 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
315
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
368
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
391 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
392 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
393 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
394 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
395 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
396 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
397
398 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
399 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
400 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
401 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
402
403 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
404 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
405 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
408 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
419 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
420 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
421 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
422 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
423 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
424 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
425 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
426 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
427 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
428 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
429
430 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
431 pVCpu->em.s.pCliStatTree = 0;
432
433 /* these should be considered for release statistics. */
434 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
435 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
436 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
438 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
440 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
441 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
442#endif /* VBOX_WITH_STATISTICS */
443 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
444 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
445#ifdef VBOX_WITH_STATISTICS
446 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
447 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
448 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
449 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
450 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
451 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
452#endif /* VBOX_WITH_STATISTICS */
453
454 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
455 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
456 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
457 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
458 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
459
460 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
461
462 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
463 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", i);
464 AssertRC(rc);
465
466 /* History record statistics */
467 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
468 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", i);
469 AssertRC(rc);
470
471 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
472 {
473 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
474 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", i, iStep);
475 AssertRC(rc);
476 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
477 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", i, iStep);
478 AssertRC(rc);
479 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
480 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", i, iStep);
481 AssertRC(rc);
482 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
483 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", i, iStep);
484 AssertRC(rc);
485 }
486 }
487
488 emR3InitDbg(pVM);
489 return VINF_SUCCESS;
490}
491
492
493/**
494 * Applies relocations to data and code managed by this
495 * component. This function will be called at init and
496 * whenever the VMM need to relocate it self inside the GC.
497 *
498 * @param pVM The cross context VM structure.
499 */
500VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
501{
502 LogFlow(("EMR3Relocate\n"));
503 for (VMCPUID i = 0; i < pVM->cCpus; i++)
504 {
505 PVMCPU pVCpu = &pVM->aCpus[i];
506 if (pVCpu->em.s.pStatsR3)
507 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
508 }
509}
510
511
512/**
513 * Reset the EM state for a CPU.
514 *
515 * Called by EMR3Reset and hot plugging.
516 *
517 * @param pVCpu The cross context virtual CPU structure.
518 */
519VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
520{
521 /* Reset scheduling state. */
522 pVCpu->em.s.fForceRAW = false;
523 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
524
525 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
526 out of the HALTED state here so that enmPrevState doesn't end up as
527 HALTED when EMR3Execute returns. */
528 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
529 {
530 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
531 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
532 }
533}
534
535
536/**
537 * Reset notification.
538 *
539 * @param pVM The cross context VM structure.
540 */
541VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
542{
543 Log(("EMR3Reset: \n"));
544 for (VMCPUID i = 0; i < pVM->cCpus; i++)
545 EMR3ResetCpu(&pVM->aCpus[i]);
546}
547
548
549/**
550 * Terminates the EM.
551 *
552 * Termination means cleaning up and freeing all resources,
553 * the VM it self is at this point powered off or suspended.
554 *
555 * @returns VBox status code.
556 * @param pVM The cross context VM structure.
557 */
558VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
559{
560 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
561
562#ifdef VBOX_WITH_REM
563 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
564#else
565 RT_NOREF(pVM);
566#endif
567 return VINF_SUCCESS;
568}
569
570
571/**
572 * Execute state save operation.
573 *
574 * @returns VBox status code.
575 * @param pVM The cross context VM structure.
576 * @param pSSM SSM operation handle.
577 */
578static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
579{
580 for (VMCPUID i = 0; i < pVM->cCpus; i++)
581 {
582 PVMCPU pVCpu = &pVM->aCpus[i];
583
584 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
585
586 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
587 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
588 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
589
590 /* Save mwait state. */
591 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
592 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
593 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
594 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
595 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
596 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
597 AssertRCReturn(rc, rc);
598 }
599 return VINF_SUCCESS;
600}
601
602
603/**
604 * Execute state load operation.
605 *
606 * @returns VBox status code.
607 * @param pVM The cross context VM structure.
608 * @param pSSM SSM operation handle.
609 * @param uVersion Data layout version.
610 * @param uPass The data pass.
611 */
612static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
613{
614 /*
615 * Validate version.
616 */
617 if ( uVersion > EM_SAVED_STATE_VERSION
618 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
619 {
620 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
621 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
622 }
623 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
624
625 /*
626 * Load the saved state.
627 */
628 for (VMCPUID i = 0; i < pVM->cCpus; i++)
629 {
630 PVMCPU pVCpu = &pVM->aCpus[i];
631
632 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
633 if (RT_FAILURE(rc))
634 pVCpu->em.s.fForceRAW = false;
635 AssertRCReturn(rc, rc);
636
637 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
638 {
639 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
640 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
641 AssertRCReturn(rc, rc);
642 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
643
644 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
645 }
646 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
647 {
648 /* Load mwait state. */
649 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
650 AssertRCReturn(rc, rc);
651 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
652 AssertRCReturn(rc, rc);
653 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
654 AssertRCReturn(rc, rc);
655 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
656 AssertRCReturn(rc, rc);
657 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
658 AssertRCReturn(rc, rc);
659 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
660 AssertRCReturn(rc, rc);
661 }
662
663 Assert(!pVCpu->em.s.pCliStatTree);
664 }
665 return VINF_SUCCESS;
666}
667
668
669/**
670 * Argument packet for emR3SetExecutionPolicy.
671 */
672struct EMR3SETEXECPOLICYARGS
673{
674 EMEXECPOLICY enmPolicy;
675 bool fEnforce;
676};
677
678
679/**
680 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
681 */
682static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
683{
684 /*
685 * Only the first CPU changes the variables.
686 */
687 if (pVCpu->idCpu == 0)
688 {
689 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
690 switch (pArgs->enmPolicy)
691 {
692 case EMEXECPOLICY_RECOMPILE_RING0:
693 pVM->fRecompileSupervisor = pArgs->fEnforce;
694 break;
695 case EMEXECPOLICY_RECOMPILE_RING3:
696 pVM->fRecompileUser = pArgs->fEnforce;
697 break;
698 case EMEXECPOLICY_IEM_ALL:
699 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
700 break;
701 default:
702 AssertFailedReturn(VERR_INVALID_PARAMETER);
703 }
704 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
705 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
706 }
707
708 /*
709 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
710 */
711 return pVCpu->em.s.enmState == EMSTATE_RAW
712 || pVCpu->em.s.enmState == EMSTATE_HM
713 || pVCpu->em.s.enmState == EMSTATE_NEM
714 || pVCpu->em.s.enmState == EMSTATE_IEM
715 || pVCpu->em.s.enmState == EMSTATE_REM
716 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
717 ? VINF_EM_RESCHEDULE
718 : VINF_SUCCESS;
719}
720
721
722/**
723 * Changes an execution scheduling policy parameter.
724 *
725 * This is used to enable or disable raw-mode / hardware-virtualization
726 * execution of user and supervisor code.
727 *
728 * @returns VINF_SUCCESS on success.
729 * @returns VINF_RESCHEDULE if a rescheduling might be required.
730 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
731 *
732 * @param pUVM The user mode VM handle.
733 * @param enmPolicy The scheduling policy to change.
734 * @param fEnforce Whether to enforce the policy or not.
735 */
736VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
737{
738 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
739 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
740 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
741
742 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
743 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
744}
745
746
747/**
748 * Queries an execution scheduling policy parameter.
749 *
750 * @returns VBox status code
751 * @param pUVM The user mode VM handle.
752 * @param enmPolicy The scheduling policy to query.
753 * @param pfEnforced Where to return the current value.
754 */
755VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
756{
757 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
758 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
759 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
760 PVM pVM = pUVM->pVM;
761 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
762
763 /* No need to bother EMTs with a query. */
764 switch (enmPolicy)
765 {
766 case EMEXECPOLICY_RECOMPILE_RING0:
767 *pfEnforced = pVM->fRecompileSupervisor;
768 break;
769 case EMEXECPOLICY_RECOMPILE_RING3:
770 *pfEnforced = pVM->fRecompileUser;
771 break;
772 case EMEXECPOLICY_IEM_ALL:
773 *pfEnforced = pVM->em.s.fIemExecutesAll;
774 break;
775 default:
776 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
777 }
778
779 return VINF_SUCCESS;
780}
781
782
783/**
784 * Queries the main execution engine of the VM.
785 *
786 * @returns VBox status code
787 * @param pUVM The user mode VM handle.
788 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
789 */
790VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
791{
792 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
793 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
794
795 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
796 PVM pVM = pUVM->pVM;
797 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
798
799 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
800 return VINF_SUCCESS;
801}
802
803
804/**
805 * Raise a fatal error.
806 *
807 * Safely terminate the VM with full state report and stuff. This function
808 * will naturally never return.
809 *
810 * @param pVCpu The cross context virtual CPU structure.
811 * @param rc VBox status code.
812 */
813VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
814{
815 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
816 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
817}
818
819
820#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
821/**
822 * Gets the EM state name.
823 *
824 * @returns pointer to read only state name,
825 * @param enmState The state.
826 */
827static const char *emR3GetStateName(EMSTATE enmState)
828{
829 switch (enmState)
830 {
831 case EMSTATE_NONE: return "EMSTATE_NONE";
832 case EMSTATE_RAW: return "EMSTATE_RAW";
833 case EMSTATE_HM: return "EMSTATE_HM";
834 case EMSTATE_IEM: return "EMSTATE_IEM";
835 case EMSTATE_REM: return "EMSTATE_REM";
836 case EMSTATE_HALTED: return "EMSTATE_HALTED";
837 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
838 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
839 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
840 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
841 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
842 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
843 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
844 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
845 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
846 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
847 case EMSTATE_NEM: return "EMSTATE_NEM";
848 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
849 default: return "Unknown!";
850 }
851}
852#endif /* LOG_ENABLED || VBOX_STRICT */
853
854
855/**
856 * Handle pending ring-3 I/O port write.
857 *
858 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
859 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
860 *
861 * @returns Strict VBox status code.
862 * @param pVM The cross context VM structure.
863 * @param pVCpu The cross context virtual CPU structure.
864 */
865VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
866{
867 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
868
869 /* Get and clear the pending data. */
870 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
871 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
872 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
873 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
874 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
875
876 /* Assert sanity. */
877 switch (cbValue)
878 {
879 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
880 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
881 case 4: break;
882 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
883 }
884 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
885
886 /* Do the work.*/
887 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
888 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
889 if (IOM_SUCCESS(rcStrict))
890 {
891 pVCpu->cpum.GstCtx.rip += cbInstr;
892 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
893 }
894 return rcStrict;
895}
896
897
898/**
899 * Handle pending ring-3 I/O port write.
900 *
901 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
902 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
903 *
904 * @returns Strict VBox status code.
905 * @param pVM The cross context VM structure.
906 * @param pVCpu The cross context virtual CPU structure.
907 */
908VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
909{
910 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
911
912 /* Get and clear the pending data. */
913 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
914 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
915 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
916 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
917
918 /* Assert sanity. */
919 switch (cbValue)
920 {
921 case 1: break;
922 case 2: break;
923 case 4: break;
924 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
925 }
926 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
927 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
928
929 /* Do the work.*/
930 uint32_t uValue = 0;
931 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
932 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
933 if (IOM_SUCCESS(rcStrict))
934 {
935 if (cbValue == 4)
936 pVCpu->cpum.GstCtx.rax = uValue;
937 else if (cbValue == 2)
938 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
939 else
940 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
941 pVCpu->cpum.GstCtx.rip += cbInstr;
942 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
943 }
944 return rcStrict;
945}
946
947
948/**
949 * Debug loop.
950 *
951 * @returns VBox status code for EM.
952 * @param pVM The cross context VM structure.
953 * @param pVCpu The cross context virtual CPU structure.
954 * @param rc Current EM VBox status code.
955 */
956static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
957{
958 for (;;)
959 {
960 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
961 const VBOXSTRICTRC rcLast = rc;
962
963 /*
964 * Debug related RC.
965 */
966 switch (VBOXSTRICTRC_VAL(rc))
967 {
968 /*
969 * Single step an instruction.
970 */
971 case VINF_EM_DBG_STEP:
972 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
973 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
974 || pVCpu->em.s.fForceRAW /* paranoia */)
975#ifdef VBOX_WITH_RAW_MODE
976 rc = emR3RawStep(pVM, pVCpu);
977#else
978 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
979#endif
980 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
981 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
982 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
983 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
984#ifdef VBOX_WITH_REM
985 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
986 rc = emR3RemStep(pVM, pVCpu);
987#endif
988 else
989 {
990 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
991 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
992 rc = VINF_EM_DBG_STEPPED;
993 }
994 break;
995
996 /*
997 * Simple events: stepped, breakpoint, stop/assertion.
998 */
999 case VINF_EM_DBG_STEPPED:
1000 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
1001 break;
1002
1003 case VINF_EM_DBG_BREAKPOINT:
1004 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
1005 break;
1006
1007 case VINF_EM_DBG_STOP:
1008 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
1009 break;
1010
1011 case VINF_EM_DBG_EVENT:
1012 rc = DBGFR3EventHandlePending(pVM, pVCpu);
1013 break;
1014
1015 case VINF_EM_DBG_HYPER_STEPPED:
1016 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
1017 break;
1018
1019 case VINF_EM_DBG_HYPER_BREAKPOINT:
1020 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
1021 break;
1022
1023 case VINF_EM_DBG_HYPER_ASSERTION:
1024 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1025 RTLogFlush(NULL);
1026 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1027 break;
1028
1029 /*
1030 * Guru meditation.
1031 */
1032 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
1033 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
1034 break;
1035 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
1036 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
1037 break;
1038 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
1039 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
1040 break;
1041
1042 default: /** @todo don't use default for guru, but make special errors code! */
1043 {
1044 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1045 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
1046 break;
1047 }
1048 }
1049
1050 /*
1051 * Process the result.
1052 */
1053 switch (VBOXSTRICTRC_VAL(rc))
1054 {
1055 /*
1056 * Continue the debugging loop.
1057 */
1058 case VINF_EM_DBG_STEP:
1059 case VINF_EM_DBG_STOP:
1060 case VINF_EM_DBG_EVENT:
1061 case VINF_EM_DBG_STEPPED:
1062 case VINF_EM_DBG_BREAKPOINT:
1063 case VINF_EM_DBG_HYPER_STEPPED:
1064 case VINF_EM_DBG_HYPER_BREAKPOINT:
1065 case VINF_EM_DBG_HYPER_ASSERTION:
1066 break;
1067
1068 /*
1069 * Resuming execution (in some form) has to be done here if we got
1070 * a hypervisor debug event.
1071 */
1072 case VINF_SUCCESS:
1073 case VINF_EM_RESUME:
1074 case VINF_EM_SUSPEND:
1075 case VINF_EM_RESCHEDULE:
1076 case VINF_EM_RESCHEDULE_RAW:
1077 case VINF_EM_RESCHEDULE_REM:
1078 case VINF_EM_HALT:
1079 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1080 {
1081#ifdef VBOX_WITH_RAW_MODE
1082 rc = emR3RawResumeHyper(pVM, pVCpu);
1083 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
1084 continue;
1085#else
1086 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
1087#endif
1088 }
1089 if (rc == VINF_SUCCESS)
1090 rc = VINF_EM_RESCHEDULE;
1091 return rc;
1092
1093 /*
1094 * The debugger isn't attached.
1095 * We'll simply turn the thing off since that's the easiest thing to do.
1096 */
1097 case VERR_DBGF_NOT_ATTACHED:
1098 switch (VBOXSTRICTRC_VAL(rcLast))
1099 {
1100 case VINF_EM_DBG_HYPER_STEPPED:
1101 case VINF_EM_DBG_HYPER_BREAKPOINT:
1102 case VINF_EM_DBG_HYPER_ASSERTION:
1103 case VERR_TRPM_PANIC:
1104 case VERR_TRPM_DONT_PANIC:
1105 case VERR_VMM_RING0_ASSERTION:
1106 case VERR_VMM_HYPER_CR3_MISMATCH:
1107 case VERR_VMM_RING3_CALL_DISABLED:
1108 return rcLast;
1109 }
1110 return VINF_EM_OFF;
1111
1112 /*
1113 * Status codes terminating the VM in one or another sense.
1114 */
1115 case VINF_EM_TERMINATE:
1116 case VINF_EM_OFF:
1117 case VINF_EM_RESET:
1118 case VINF_EM_NO_MEMORY:
1119 case VINF_EM_RAW_STALE_SELECTOR:
1120 case VINF_EM_RAW_IRET_TRAP:
1121 case VERR_TRPM_PANIC:
1122 case VERR_TRPM_DONT_PANIC:
1123 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1124 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1125 case VERR_VMM_RING0_ASSERTION:
1126 case VERR_VMM_HYPER_CR3_MISMATCH:
1127 case VERR_VMM_RING3_CALL_DISABLED:
1128 case VERR_INTERNAL_ERROR:
1129 case VERR_INTERNAL_ERROR_2:
1130 case VERR_INTERNAL_ERROR_3:
1131 case VERR_INTERNAL_ERROR_4:
1132 case VERR_INTERNAL_ERROR_5:
1133 case VERR_IPE_UNEXPECTED_STATUS:
1134 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1135 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1136 return rc;
1137
1138 /*
1139 * The rest is unexpected, and will keep us here.
1140 */
1141 default:
1142 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1143 break;
1144 }
1145 } /* debug for ever */
1146}
1147
1148
1149#if defined(VBOX_WITH_REM) || defined(DEBUG)
1150/**
1151 * Steps recompiled code.
1152 *
1153 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1154 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1155 *
1156 * @param pVM The cross context VM structure.
1157 * @param pVCpu The cross context virtual CPU structure.
1158 */
1159static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1160{
1161 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1162
1163# ifdef VBOX_WITH_REM
1164 EMRemLock(pVM);
1165
1166 /*
1167 * Switch to REM, step instruction, switch back.
1168 */
1169 int rc = REMR3State(pVM, pVCpu);
1170 if (RT_SUCCESS(rc))
1171 {
1172 rc = REMR3Step(pVM, pVCpu);
1173 REMR3StateBack(pVM, pVCpu);
1174 }
1175 EMRemUnlock(pVM);
1176
1177# else
1178 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1179# endif
1180
1181 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1182 return rc;
1183}
1184#endif /* VBOX_WITH_REM || DEBUG */
1185
1186
1187#ifdef VBOX_WITH_REM
1188/**
1189 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1190 * critical section.
1191 *
1192 * @returns false - new fInREMState value.
1193 * @param pVM The cross context VM structure.
1194 * @param pVCpu The cross context virtual CPU structure.
1195 */
1196DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1197{
1198 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1199 REMR3StateBack(pVM, pVCpu);
1200 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1201
1202 EMRemUnlock(pVM);
1203 return false;
1204}
1205#endif
1206
1207
1208/**
1209 * Executes recompiled code.
1210 *
1211 * This function contains the recompiler version of the inner
1212 * execution loop (the outer loop being in EMR3ExecuteVM()).
1213 *
1214 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1215 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1216 *
1217 * @param pVM The cross context VM structure.
1218 * @param pVCpu The cross context virtual CPU structure.
1219 * @param pfFFDone Where to store an indicator telling whether or not
1220 * FFs were done before returning.
1221 *
1222 */
1223static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1224{
1225#ifdef LOG_ENABLED
1226 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1227 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1228
1229 if (pCtx->eflags.Bits.u1VM)
1230 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1231 else
1232 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1233#endif
1234 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1235
1236#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1237 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1238 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1239 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1240#endif
1241
1242 /*
1243 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1244 * or the REM suggests raw-mode execution.
1245 */
1246 *pfFFDone = false;
1247#ifdef VBOX_WITH_REM
1248 bool fInREMState = false;
1249#else
1250 uint32_t cLoops = 0;
1251#endif
1252 int rc = VINF_SUCCESS;
1253 for (;;)
1254 {
1255#ifdef VBOX_WITH_REM
1256 /*
1257 * Lock REM and update the state if not already in sync.
1258 *
1259 * Note! Big lock, but you are not supposed to own any lock when
1260 * coming in here.
1261 */
1262 if (!fInREMState)
1263 {
1264 EMRemLock(pVM);
1265 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1266
1267 /* Flush the recompiler translation blocks if the VCPU has changed,
1268 also force a full CPU state resync. */
1269 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1270 {
1271 REMFlushTBs(pVM);
1272 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1273 }
1274 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1275
1276 rc = REMR3State(pVM, pVCpu);
1277
1278 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1279 if (RT_FAILURE(rc))
1280 break;
1281 fInREMState = true;
1282
1283 /*
1284 * We might have missed the raising of VMREQ, TIMER and some other
1285 * important FFs while we were busy switching the state. So, check again.
1286 */
1287 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1288 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1289 {
1290 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1291 goto l_REMDoForcedActions;
1292 }
1293 }
1294#endif
1295
1296 /*
1297 * Execute REM.
1298 */
1299 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1300 {
1301 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1302#ifdef VBOX_WITH_REM
1303 rc = REMR3Run(pVM, pVCpu);
1304#else
1305 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1306#endif
1307 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1308 }
1309 else
1310 {
1311 /* Give up this time slice; virtual time continues */
1312 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1313 RTThreadSleep(5);
1314 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1315 rc = VINF_SUCCESS;
1316 }
1317
1318 /*
1319 * Deal with high priority post execution FFs before doing anything
1320 * else. Sync back the state and leave the lock to be on the safe side.
1321 */
1322 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1323 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1324 {
1325#ifdef VBOX_WITH_REM
1326 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1327#endif
1328 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1329 }
1330
1331 /*
1332 * Process the returned status code.
1333 */
1334 if (rc != VINF_SUCCESS)
1335 {
1336 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1337 break;
1338 if (rc != VINF_REM_INTERRUPED_FF)
1339 {
1340#ifndef VBOX_WITH_REM
1341 /* Try dodge unimplemented IEM trouble by reschduling. */
1342 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1343 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1344 {
1345 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1346 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1347 {
1348 rc = VINF_EM_RESCHEDULE;
1349 break;
1350 }
1351 }
1352#endif
1353
1354 /*
1355 * Anything which is not known to us means an internal error
1356 * and the termination of the VM!
1357 */
1358 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1359 break;
1360 }
1361 }
1362
1363
1364 /*
1365 * Check and execute forced actions.
1366 *
1367 * Sync back the VM state and leave the lock before calling any of
1368 * these, you never know what's going to happen here.
1369 */
1370#ifdef VBOX_HIGH_RES_TIMERS_HACK
1371 TMTimerPollVoid(pVM, pVCpu);
1372#endif
1373 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1374 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1375 || VMCPU_FF_IS_PENDING(pVCpu,
1376 VMCPU_FF_ALL_REM_MASK
1377 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1378 {
1379#ifdef VBOX_WITH_REM
1380l_REMDoForcedActions:
1381 if (fInREMState)
1382 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1383#endif
1384 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1385 rc = emR3ForcedActions(pVM, pVCpu, rc);
1386 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1387 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1388 if ( rc != VINF_SUCCESS
1389 && rc != VINF_EM_RESCHEDULE_REM)
1390 {
1391 *pfFFDone = true;
1392 break;
1393 }
1394 }
1395
1396#ifndef VBOX_WITH_REM
1397 /*
1398 * Have to check if we can get back to fast execution mode every so often.
1399 */
1400 if (!(++cLoops & 7))
1401 {
1402 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1403 if ( enmCheck != EMSTATE_REM
1404 && enmCheck != EMSTATE_IEM_THEN_REM)
1405 return VINF_EM_RESCHEDULE;
1406 }
1407#endif
1408
1409 } /* The Inner Loop, recompiled execution mode version. */
1410
1411
1412#ifdef VBOX_WITH_REM
1413 /*
1414 * Returning. Sync back the VM state if required.
1415 */
1416 if (fInREMState)
1417 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1418#endif
1419
1420 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1421 return rc;
1422}
1423
1424
1425#ifdef DEBUG
1426
1427int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1428{
1429 EMSTATE enmOldState = pVCpu->em.s.enmState;
1430
1431 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1432
1433 Log(("Single step BEGIN:\n"));
1434 for (uint32_t i = 0; i < cIterations; i++)
1435 {
1436 DBGFR3PrgStep(pVCpu);
1437 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1438 emR3RemStep(pVM, pVCpu);
1439 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1440 break;
1441 }
1442 Log(("Single step END:\n"));
1443 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1444 pVCpu->em.s.enmState = enmOldState;
1445 return VINF_EM_RESCHEDULE;
1446}
1447
1448#endif /* DEBUG */
1449
1450
1451/**
1452 * Try execute the problematic code in IEM first, then fall back on REM if there
1453 * is too much of it or if IEM doesn't implement something.
1454 *
1455 * @returns Strict VBox status code from IEMExecLots.
1456 * @param pVM The cross context VM structure.
1457 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1458 * @param pfFFDone Force flags done indicator.
1459 *
1460 * @thread EMT(pVCpu)
1461 */
1462static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1463{
1464 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1465 *pfFFDone = false;
1466
1467 /*
1468 * Execute in IEM for a while.
1469 */
1470 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1471 {
1472 uint32_t cInstructions;
1473 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1474 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1475 if (rcStrict != VINF_SUCCESS)
1476 {
1477 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1478 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1479 break;
1480
1481 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1482 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1483 return rcStrict;
1484 }
1485
1486 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1487 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1488 {
1489 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1490 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1491 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1492 pVCpu->em.s.enmState = enmNewState;
1493 return VINF_SUCCESS;
1494 }
1495
1496 /*
1497 * Check for pending actions.
1498 */
1499 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1500 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1501 return VINF_SUCCESS;
1502 }
1503
1504 /*
1505 * Switch to REM.
1506 */
1507 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1508 pVCpu->em.s.enmState = EMSTATE_REM;
1509 return VINF_SUCCESS;
1510}
1511
1512
1513/**
1514 * Decides whether to execute RAW, HWACC or REM.
1515 *
1516 * @returns new EM state
1517 * @param pVM The cross context VM structure.
1518 * @param pVCpu The cross context virtual CPU structure.
1519 * @param pCtx Pointer to the guest CPU context.
1520 */
1521EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1522{
1523 /*
1524 * When forcing raw-mode execution, things are simple.
1525 */
1526 if (pVCpu->em.s.fForceRAW)
1527 return EMSTATE_RAW;
1528
1529 /*
1530 * We stay in the wait for SIPI state unless explicitly told otherwise.
1531 */
1532 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1533 return EMSTATE_WAIT_SIPI;
1534
1535 /*
1536 * Execute everything in IEM?
1537 */
1538 if (pVM->em.s.fIemExecutesAll)
1539 return EMSTATE_IEM;
1540
1541 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1542 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1543 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1544
1545 X86EFLAGS EFlags = pCtx->eflags;
1546 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1547 {
1548 if (EMIsHwVirtExecutionEnabled(pVM))
1549 {
1550 if (VM_IS_HM_ENABLED(pVM))
1551 {
1552 if (HMR3CanExecuteGuest(pVM, pCtx))
1553 return EMSTATE_HM;
1554 }
1555 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1556 return EMSTATE_NEM;
1557
1558 /*
1559 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1560 * turns off monitoring features essential for raw mode!
1561 */
1562 return EMSTATE_IEM_THEN_REM;
1563 }
1564 }
1565
1566 /*
1567 * Standard raw-mode:
1568 *
1569 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1570 * or 32 bits protected mode ring 0 code
1571 *
1572 * The tests are ordered by the likelihood of being true during normal execution.
1573 */
1574 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1575 {
1576 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1577 return EMSTATE_REM;
1578 }
1579
1580# ifndef VBOX_RAW_V86
1581 if (EFlags.u32 & X86_EFL_VM) {
1582 Log2(("raw mode refused: VM_MASK\n"));
1583 return EMSTATE_REM;
1584 }
1585# endif
1586
1587 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1588 uint32_t u32CR0 = pCtx->cr0;
1589 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1590 {
1591 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1592 return EMSTATE_REM;
1593 }
1594
1595 if (pCtx->cr4 & X86_CR4_PAE)
1596 {
1597 uint32_t u32Dummy, u32Features;
1598
1599 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1600 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1601 return EMSTATE_REM;
1602 }
1603
1604 unsigned uSS = pCtx->ss.Sel;
1605 if ( pCtx->eflags.Bits.u1VM
1606 || (uSS & X86_SEL_RPL) == 3)
1607 {
1608 if (!EMIsRawRing3Enabled(pVM))
1609 return EMSTATE_REM;
1610
1611 if (!(EFlags.u32 & X86_EFL_IF))
1612 {
1613 Log2(("raw mode refused: IF (RawR3)\n"));
1614 return EMSTATE_REM;
1615 }
1616
1617 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1618 {
1619 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1620 return EMSTATE_REM;
1621 }
1622 }
1623 else
1624 {
1625 if (!EMIsRawRing0Enabled(pVM))
1626 return EMSTATE_REM;
1627
1628 if (EMIsRawRing1Enabled(pVM))
1629 {
1630 /* Only ring 0 and 1 supervisor code. */
1631 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1632 {
1633 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1634 return EMSTATE_REM;
1635 }
1636 }
1637 /* Only ring 0 supervisor code. */
1638 else if ((uSS & X86_SEL_RPL) != 0)
1639 {
1640 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1641 return EMSTATE_REM;
1642 }
1643
1644 // Let's start with pure 32 bits ring 0 code first
1645 /** @todo What's pure 32-bit mode? flat? */
1646 if ( !(pCtx->ss.Attr.n.u1DefBig)
1647 || !(pCtx->cs.Attr.n.u1DefBig))
1648 {
1649 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1650 return EMSTATE_REM;
1651 }
1652
1653 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1654 if (!(u32CR0 & X86_CR0_WP))
1655 {
1656 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1657 return EMSTATE_REM;
1658 }
1659
1660# ifdef VBOX_WITH_RAW_MODE
1661 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1662 {
1663 Log2(("raw r0 mode forced: patch code\n"));
1664# ifdef VBOX_WITH_SAFE_STR
1665 Assert(pCtx->tr.Sel);
1666# endif
1667 return EMSTATE_RAW;
1668 }
1669# endif /* VBOX_WITH_RAW_MODE */
1670
1671# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1672 if (!(EFlags.u32 & X86_EFL_IF))
1673 {
1674 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1675 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1676 return EMSTATE_REM;
1677 }
1678# endif
1679
1680# ifndef VBOX_WITH_RAW_RING1
1681 /** @todo still necessary??? */
1682 if (EFlags.Bits.u2IOPL != 0)
1683 {
1684 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1685 return EMSTATE_REM;
1686 }
1687# endif
1688 }
1689
1690 /*
1691 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1692 */
1693 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1694 {
1695 Log2(("raw mode refused: stale CS\n"));
1696 return EMSTATE_REM;
1697 }
1698 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1699 {
1700 Log2(("raw mode refused: stale SS\n"));
1701 return EMSTATE_REM;
1702 }
1703 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1704 {
1705 Log2(("raw mode refused: stale DS\n"));
1706 return EMSTATE_REM;
1707 }
1708 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1709 {
1710 Log2(("raw mode refused: stale ES\n"));
1711 return EMSTATE_REM;
1712 }
1713 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1714 {
1715 Log2(("raw mode refused: stale FS\n"));
1716 return EMSTATE_REM;
1717 }
1718 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1719 {
1720 Log2(("raw mode refused: stale GS\n"));
1721 return EMSTATE_REM;
1722 }
1723
1724# ifdef VBOX_WITH_SAFE_STR
1725 if (pCtx->tr.Sel == 0)
1726 {
1727 Log(("Raw mode refused -> TR=0\n"));
1728 return EMSTATE_REM;
1729 }
1730# endif
1731
1732 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1733 return EMSTATE_RAW;
1734}
1735
1736
1737/**
1738 * Executes all high priority post execution force actions.
1739 *
1740 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1741 * fatal error status code.
1742 *
1743 * @param pVM The cross context VM structure.
1744 * @param pVCpu The cross context virtual CPU structure.
1745 * @param rc The current strict VBox status code rc.
1746 */
1747VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1748{
1749 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1750
1751 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1752 PDMCritSectBothFF(pVCpu);
1753
1754 /* Update CR3 (Nested Paging case for HM). */
1755 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1756 {
1757 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1758 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1759 if (RT_FAILURE(rc2))
1760 return rc2;
1761 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1762 }
1763
1764 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1765 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1766 {
1767 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1768 if (CPUMIsGuestInPAEMode(pVCpu))
1769 {
1770 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1771 AssertPtr(pPdpes);
1772
1773 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1774 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1775 }
1776 else
1777 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1778 }
1779
1780 /* IEM has pending work (typically memory write after INS instruction). */
1781 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1782 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1783
1784 /* IOM has pending work (comitting an I/O or MMIO write). */
1785 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1786 {
1787 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1788 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1789 { /* half likely, or at least it's a line shorter. */ }
1790 else if (rc == VINF_SUCCESS)
1791 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1792 else
1793 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1794 }
1795
1796#ifdef VBOX_WITH_RAW_MODE
1797 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1798 CSAMR3DoPendingAction(pVM, pVCpu);
1799#endif
1800
1801 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1802 {
1803 if ( rc > VINF_EM_NO_MEMORY
1804 && rc <= VINF_EM_LAST)
1805 rc = VINF_EM_NO_MEMORY;
1806 }
1807
1808 return rc;
1809}
1810
1811#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1812/**
1813 * Helper for emR3ForcedActions() for injecting interrupts into the
1814 * nested-guest.
1815 *
1816 * @returns VBox status code.
1817 * @param pVCpu The cross context virtual CPU structure.
1818 * @param pCtx Pointer to the nested-guest CPU context.
1819 * @param pfResched Where to store whether a reschedule is required.
1820 * @param pfInject Where to store whether an interrupt was injected (and if
1821 * a wake up is pending).
1822 */
1823static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1824{
1825 *pfResched = false;
1826 *pfInject = false;
1827 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1828 {
1829 PVM pVM = pVCpu->CTX_SUFF(pVM);
1830 Assert(pCtx->hwvirt.fGif);
1831 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
1832#ifdef VBOX_WITH_RAW_MODE
1833 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1834#endif
1835 if (fVirtualGif)
1836 {
1837 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1838 {
1839 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1840 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1841 {
1842 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1843 {
1844 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1845 if (RT_SUCCESS(rcStrict))
1846 {
1847 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1848 * doesn't intercept HLT but intercepts INTR? */
1849 *pfResched = true;
1850 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1851 if (rcStrict == VINF_SVM_VMEXIT)
1852 return VINF_SUCCESS;
1853 return VBOXSTRICTRC_VAL(rcStrict);
1854 }
1855
1856 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1857 return VINF_EM_TRIPLE_FAULT;
1858 }
1859
1860 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1861 /** @todo this really isn't nice, should properly handle this */
1862 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1863 Assert(rc != VINF_PGM_CHANGE_MODE);
1864 if (rc == VINF_SVM_VMEXIT)
1865 rc = VINF_SUCCESS;
1866 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1867 || rc == VINF_EM_RESCHEDULE_HM
1868 || rc == VINF_EM_RESCHEDULE_RAW))
1869 {
1870 rc = VINF_EM_RESCHEDULE;
1871 }
1872
1873 *pfResched = true;
1874 *pfInject = true;
1875 return rc;
1876 }
1877 }
1878
1879 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1880 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1881 {
1882 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1883 {
1884 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1885 if (RT_SUCCESS(rcStrict))
1886 {
1887 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1888 * doesn't intercept HLT but intercepts VINTR? */
1889 *pfResched = true;
1890 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1891 if (rcStrict == VINF_SVM_VMEXIT)
1892 return VINF_SUCCESS;
1893 return VBOXSTRICTRC_VAL(rcStrict);
1894 }
1895
1896 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1897 return VINF_EM_TRIPLE_FAULT;
1898 }
1899
1900 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1901 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1902 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1903 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1904 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1905
1906 *pfResched = true;
1907 *pfInject = true;
1908 return VINF_EM_RESCHEDULE;
1909 }
1910 }
1911 return VINF_SUCCESS;
1912 }
1913
1914 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1915 { /** @todo Nested VMX. */ }
1916
1917 /* Shouldn't really get here. */
1918 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1919 return VERR_EM_INTERNAL_ERROR;
1920}
1921#endif
1922
1923/**
1924 * Executes all pending forced actions.
1925 *
1926 * Forced actions can cause execution delays and execution
1927 * rescheduling. The first we deal with using action priority, so
1928 * that for instance pending timers aren't scheduled and ran until
1929 * right before execution. The rescheduling we deal with using
1930 * return codes. The same goes for VM termination, only in that case
1931 * we exit everything.
1932 *
1933 * @returns VBox status code of equal or greater importance/severity than rc.
1934 * The most important ones are: VINF_EM_RESCHEDULE,
1935 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1936 *
1937 * @param pVM The cross context VM structure.
1938 * @param pVCpu The cross context virtual CPU structure.
1939 * @param rc The current rc.
1940 *
1941 */
1942int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1943{
1944 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1945#ifdef VBOX_STRICT
1946 int rcIrq = VINF_SUCCESS;
1947#endif
1948 int rc2;
1949#define UPDATE_RC() \
1950 do { \
1951 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1952 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1953 break; \
1954 if (!rc || rc2 < rc) \
1955 rc = rc2; \
1956 } while (0)
1957 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1958
1959 /*
1960 * Post execution chunk first.
1961 */
1962 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1963 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1964 {
1965 /*
1966 * EMT Rendezvous (must be serviced before termination).
1967 */
1968 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1969 {
1970 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1971 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1972 UPDATE_RC();
1973 /** @todo HACK ALERT! The following test is to make sure EM+TM
1974 * thinks the VM is stopped/reset before the next VM state change
1975 * is made. We need a better solution for this, or at least make it
1976 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1977 * VINF_EM_SUSPEND). */
1978 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1979 {
1980 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1981 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1982 return rc;
1983 }
1984 }
1985
1986 /*
1987 * State change request (cleared by vmR3SetStateLocked).
1988 */
1989 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1990 {
1991 VMSTATE enmState = VMR3GetState(pVM);
1992 switch (enmState)
1993 {
1994 case VMSTATE_FATAL_ERROR:
1995 case VMSTATE_FATAL_ERROR_LS:
1996 case VMSTATE_GURU_MEDITATION:
1997 case VMSTATE_GURU_MEDITATION_LS:
1998 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1999 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2000 return VINF_EM_SUSPEND;
2001
2002 case VMSTATE_DESTROYING:
2003 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2004 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2005 return VINF_EM_TERMINATE;
2006
2007 default:
2008 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2009 }
2010 }
2011
2012 /*
2013 * Debugger Facility polling.
2014 */
2015 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2016 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2017 {
2018 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2019 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2020 UPDATE_RC();
2021 }
2022
2023 /*
2024 * Postponed reset request.
2025 */
2026 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
2027 {
2028 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2029 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
2030 UPDATE_RC();
2031 }
2032
2033#ifdef VBOX_WITH_RAW_MODE
2034 /*
2035 * CSAM page scanning.
2036 */
2037 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2038 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
2039 {
2040 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
2041 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
2042 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2043 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2044 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
2045 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
2046 }
2047#endif
2048
2049 /*
2050 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
2051 */
2052 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2053 {
2054 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2055 UPDATE_RC();
2056 if (rc == VINF_EM_NO_MEMORY)
2057 return rc;
2058 }
2059
2060 /* check that we got them all */
2061 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2062 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
2063 }
2064
2065 /*
2066 * Normal priority then.
2067 * (Executed in no particular order.)
2068 */
2069 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
2070 {
2071 /*
2072 * PDM Queues are pending.
2073 */
2074 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
2075 PDMR3QueueFlushAll(pVM);
2076
2077 /*
2078 * PDM DMA transfers are pending.
2079 */
2080 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
2081 PDMR3DmaRun(pVM);
2082
2083 /*
2084 * EMT Rendezvous (make sure they are handled before the requests).
2085 */
2086 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2087 {
2088 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2089 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2090 UPDATE_RC();
2091 /** @todo HACK ALERT! The following test is to make sure EM+TM
2092 * thinks the VM is stopped/reset before the next VM state change
2093 * is made. We need a better solution for this, or at least make it
2094 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2095 * VINF_EM_SUSPEND). */
2096 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2097 {
2098 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2099 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2100 return rc;
2101 }
2102 }
2103
2104 /*
2105 * Requests from other threads.
2106 */
2107 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
2108 {
2109 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2110 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
2111 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
2112 {
2113 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2114 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2115 return rc2;
2116 }
2117 UPDATE_RC();
2118 /** @todo HACK ALERT! The following test is to make sure EM+TM
2119 * thinks the VM is stopped/reset before the next VM state change
2120 * is made. We need a better solution for this, or at least make it
2121 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2122 * VINF_EM_SUSPEND). */
2123 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2124 {
2125 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2126 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2127 return rc;
2128 }
2129 }
2130
2131#ifdef VBOX_WITH_REM
2132 /* Replay the handler notification changes. */
2133 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2134 {
2135 /* Try not to cause deadlocks. */
2136 if ( pVM->cCpus == 1
2137 || ( !PGMIsLockOwner(pVM)
2138 && !IOMIsLockWriteOwner(pVM))
2139 )
2140 {
2141 EMRemLock(pVM);
2142 REMR3ReplayHandlerNotifications(pVM);
2143 EMRemUnlock(pVM);
2144 }
2145 }
2146#endif
2147
2148 /* check that we got them all */
2149 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2150 }
2151
2152 /*
2153 * Normal priority then. (per-VCPU)
2154 * (Executed in no particular order.)
2155 */
2156 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2157 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2158 {
2159 /*
2160 * Requests from other threads.
2161 */
2162 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2163 {
2164 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2165 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2166 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2167 {
2168 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2169 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2170 return rc2;
2171 }
2172 UPDATE_RC();
2173 /** @todo HACK ALERT! The following test is to make sure EM+TM
2174 * thinks the VM is stopped/reset before the next VM state change
2175 * is made. We need a better solution for this, or at least make it
2176 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2177 * VINF_EM_SUSPEND). */
2178 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2179 {
2180 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2181 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2182 return rc;
2183 }
2184 }
2185
2186 /* check that we got them all */
2187 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2188 }
2189
2190 /*
2191 * High priority pre execution chunk last.
2192 * (Executed in ascending priority order.)
2193 */
2194 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2195 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2196 {
2197 /*
2198 * Timers before interrupts.
2199 */
2200 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2201 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2202 TMR3TimerQueuesDo(pVM);
2203
2204 /*
2205 * Pick up asynchronously posted interrupts into the APIC.
2206 */
2207 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2208 APICUpdatePendingInterrupts(pVCpu);
2209
2210 /*
2211 * The instruction following an emulated STI should *always* be executed!
2212 *
2213 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2214 * the eip is the same as the inhibited instr address. Before we
2215 * are able to execute this instruction in raw mode (iret to
2216 * guest code) an external interrupt might force a world switch
2217 * again. Possibly allowing a guest interrupt to be dispatched
2218 * in the process. This could break the guest. Sounds very
2219 * unlikely, but such timing sensitive problem are not as rare as
2220 * you might think.
2221 */
2222 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2223 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2224 {
2225 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2226 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2227 {
2228 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2229 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2230 }
2231 else
2232 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2233 }
2234
2235 /*
2236 * Interrupts.
2237 */
2238 bool fWakeupPending = false;
2239 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2240 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2241 {
2242 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2243 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2244 {
2245 Assert(!HMR3IsEventPending(pVCpu));
2246 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2247#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2248 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2249 {
2250 bool fResched, fInject;
2251 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2252 if (fInject)
2253 {
2254 fWakeupPending = true;
2255# ifdef VBOX_STRICT
2256 rcIrq = rc2;
2257# endif
2258 }
2259 if (fResched)
2260 UPDATE_RC();
2261 }
2262 else
2263#endif
2264 {
2265 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2266 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2267#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2268 && pCtx->hwvirt.fGif
2269#endif
2270#ifdef VBOX_WITH_RAW_MODE
2271 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2272#endif
2273 && pCtx->eflags.Bits.u1IF)
2274 {
2275 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2276 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2277 /** @todo this really isn't nice, should properly handle this */
2278 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2279 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2280 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2281 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2282 || rc2 == VINF_EM_RESCHEDULE_HM
2283 || rc2 == VINF_EM_RESCHEDULE_RAW))
2284 {
2285 rc2 = VINF_EM_RESCHEDULE;
2286 }
2287#ifdef VBOX_STRICT
2288 rcIrq = rc2;
2289#endif
2290 UPDATE_RC();
2291 /* Reschedule required: We must not miss the wakeup below! */
2292 fWakeupPending = true;
2293 }
2294 }
2295 }
2296 }
2297
2298 /*
2299 * Allocate handy pages.
2300 */
2301 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2302 {
2303 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2304 UPDATE_RC();
2305 }
2306
2307 /*
2308 * Debugger Facility request.
2309 */
2310 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2311 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2312 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2313 {
2314 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2315 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2316 UPDATE_RC();
2317 }
2318
2319 /*
2320 * EMT Rendezvous (must be serviced before termination).
2321 */
2322 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2323 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2324 {
2325 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2326 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2327 UPDATE_RC();
2328 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2329 * stopped/reset before the next VM state change is made. We need a better
2330 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2331 * && rc >= VINF_EM_SUSPEND). */
2332 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2333 {
2334 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2335 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2336 return rc;
2337 }
2338 }
2339
2340 /*
2341 * State change request (cleared by vmR3SetStateLocked).
2342 */
2343 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2344 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2345 {
2346 VMSTATE enmState = VMR3GetState(pVM);
2347 switch (enmState)
2348 {
2349 case VMSTATE_FATAL_ERROR:
2350 case VMSTATE_FATAL_ERROR_LS:
2351 case VMSTATE_GURU_MEDITATION:
2352 case VMSTATE_GURU_MEDITATION_LS:
2353 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2354 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2355 return VINF_EM_SUSPEND;
2356
2357 case VMSTATE_DESTROYING:
2358 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2359 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2360 return VINF_EM_TERMINATE;
2361
2362 default:
2363 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2364 }
2365 }
2366
2367 /*
2368 * Out of memory? Since most of our fellow high priority actions may cause us
2369 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2370 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2371 * than us since we can terminate without allocating more memory.
2372 */
2373 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2374 {
2375 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2376 UPDATE_RC();
2377 if (rc == VINF_EM_NO_MEMORY)
2378 return rc;
2379 }
2380
2381 /*
2382 * If the virtual sync clock is still stopped, make TM restart it.
2383 */
2384 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2385 TMR3VirtualSyncFF(pVM, pVCpu);
2386
2387#ifdef DEBUG
2388 /*
2389 * Debug, pause the VM.
2390 */
2391 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2392 {
2393 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2394 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2395 return VINF_EM_SUSPEND;
2396 }
2397#endif
2398
2399 /* check that we got them all */
2400 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2401 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2402 }
2403
2404#undef UPDATE_RC
2405 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2406 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2407 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2408 return rc;
2409}
2410
2411
2412/**
2413 * Check if the preset execution time cap restricts guest execution scheduling.
2414 *
2415 * @returns true if allowed, false otherwise
2416 * @param pVM The cross context VM structure.
2417 * @param pVCpu The cross context virtual CPU structure.
2418 */
2419bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2420{
2421 uint64_t u64UserTime, u64KernelTime;
2422
2423 if ( pVM->uCpuExecutionCap != 100
2424 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2425 {
2426 uint64_t u64TimeNow = RTTimeMilliTS();
2427 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2428 {
2429 /* New time slice. */
2430 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2431 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2432 pVCpu->em.s.u64TimeSliceExec = 0;
2433 }
2434 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2435
2436 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2437 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2438 return false;
2439 }
2440 return true;
2441}
2442
2443
2444/**
2445 * Execute VM.
2446 *
2447 * This function is the main loop of the VM. The emulation thread
2448 * calls this function when the VM has been successfully constructed
2449 * and we're ready for executing the VM.
2450 *
2451 * Returning from this function means that the VM is turned off or
2452 * suspended (state already saved) and deconstruction is next in line.
2453 *
2454 * All interaction from other thread are done using forced actions
2455 * and signaling of the wait object.
2456 *
2457 * @returns VBox status code, informational status codes may indicate failure.
2458 * @param pVM The cross context VM structure.
2459 * @param pVCpu The cross context virtual CPU structure.
2460 */
2461VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2462{
2463 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2464 pVM,
2465 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2466 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2467 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2468 pVCpu->em.s.fForceRAW));
2469 VM_ASSERT_EMT(pVM);
2470 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2471 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2472 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2473 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2474
2475 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2476 if (rc == 0)
2477 {
2478 /*
2479 * Start the virtual time.
2480 */
2481 TMR3NotifyResume(pVM, pVCpu);
2482
2483 /*
2484 * The Outer Main Loop.
2485 */
2486 bool fFFDone = false;
2487
2488 /* Reschedule right away to start in the right state. */
2489 rc = VINF_SUCCESS;
2490
2491 /* If resuming after a pause or a state load, restore the previous
2492 state or else we'll start executing code. Else, just reschedule. */
2493 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2494 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2495 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2496 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2497 else
2498 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2499 pVCpu->em.s.cIemThenRemInstructions = 0;
2500 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2501
2502 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2503 for (;;)
2504 {
2505 /*
2506 * Before we can schedule anything (we're here because
2507 * scheduling is required) we must service any pending
2508 * forced actions to avoid any pending action causing
2509 * immediate rescheduling upon entering an inner loop
2510 *
2511 * Do forced actions.
2512 */
2513 if ( !fFFDone
2514 && RT_SUCCESS(rc)
2515 && rc != VINF_EM_TERMINATE
2516 && rc != VINF_EM_OFF
2517 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2518 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2519 {
2520 rc = emR3ForcedActions(pVM, pVCpu, rc);
2521 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2522 if ( ( rc == VINF_EM_RESCHEDULE_REM
2523 || rc == VINF_EM_RESCHEDULE_HM)
2524 && pVCpu->em.s.fForceRAW)
2525 rc = VINF_EM_RESCHEDULE_RAW;
2526 }
2527 else if (fFFDone)
2528 fFFDone = false;
2529
2530 /*
2531 * Now what to do?
2532 */
2533 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2534 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2535 switch (rc)
2536 {
2537 /*
2538 * Keep doing what we're currently doing.
2539 */
2540 case VINF_SUCCESS:
2541 break;
2542
2543 /*
2544 * Reschedule - to raw-mode execution.
2545 */
2546/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2547 case VINF_EM_RESCHEDULE_RAW:
2548 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2549 if (VM_IS_RAW_MODE_ENABLED(pVM))
2550 {
2551 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2552 pVCpu->em.s.enmState = EMSTATE_RAW;
2553 }
2554 else
2555 {
2556 AssertLogRelFailed();
2557 pVCpu->em.s.enmState = EMSTATE_NONE;
2558 }
2559 break;
2560
2561 /*
2562 * Reschedule - to HM or NEM.
2563 */
2564 case VINF_EM_RESCHEDULE_HM:
2565 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2566 Assert(!pVCpu->em.s.fForceRAW);
2567 if (VM_IS_HM_ENABLED(pVM))
2568 {
2569 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2570 pVCpu->em.s.enmState = EMSTATE_HM;
2571 }
2572 else if (VM_IS_NEM_ENABLED(pVM))
2573 {
2574 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2575 pVCpu->em.s.enmState = EMSTATE_NEM;
2576 }
2577 else
2578 {
2579 AssertLogRelFailed();
2580 pVCpu->em.s.enmState = EMSTATE_NONE;
2581 }
2582 break;
2583
2584 /*
2585 * Reschedule - to recompiled execution.
2586 */
2587 case VINF_EM_RESCHEDULE_REM:
2588 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2589 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2590 {
2591 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2592 enmOldState, EMSTATE_IEM_THEN_REM));
2593 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2594 {
2595 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2596 pVCpu->em.s.cIemThenRemInstructions = 0;
2597 }
2598 }
2599 else
2600 {
2601 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2602 pVCpu->em.s.enmState = EMSTATE_REM;
2603 }
2604 break;
2605
2606 /*
2607 * Resume.
2608 */
2609 case VINF_EM_RESUME:
2610 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2611 /* Don't reschedule in the halted or wait for SIPI case. */
2612 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2613 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2614 {
2615 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2616 break;
2617 }
2618 /* fall through and get scheduled. */
2619 RT_FALL_THRU();
2620
2621 /*
2622 * Reschedule.
2623 */
2624 case VINF_EM_RESCHEDULE:
2625 {
2626 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2627 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2628 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2629 pVCpu->em.s.cIemThenRemInstructions = 0;
2630 pVCpu->em.s.enmState = enmState;
2631 break;
2632 }
2633
2634 /*
2635 * Halted.
2636 */
2637 case VINF_EM_HALT:
2638 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2639 pVCpu->em.s.enmState = EMSTATE_HALTED;
2640 break;
2641
2642 /*
2643 * Switch to the wait for SIPI state (application processor only)
2644 */
2645 case VINF_EM_WAIT_SIPI:
2646 Assert(pVCpu->idCpu != 0);
2647 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2648 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2649 break;
2650
2651
2652 /*
2653 * Suspend.
2654 */
2655 case VINF_EM_SUSPEND:
2656 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2657 Assert(enmOldState != EMSTATE_SUSPENDED);
2658 pVCpu->em.s.enmPrevState = enmOldState;
2659 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2660 break;
2661
2662 /*
2663 * Reset.
2664 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2665 */
2666 case VINF_EM_RESET:
2667 {
2668 if (pVCpu->idCpu == 0)
2669 {
2670 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2671 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2672 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2673 pVCpu->em.s.cIemThenRemInstructions = 0;
2674 pVCpu->em.s.enmState = enmState;
2675 }
2676 else
2677 {
2678 /* All other VCPUs go into the wait for SIPI state. */
2679 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2680 }
2681 break;
2682 }
2683
2684 /*
2685 * Power Off.
2686 */
2687 case VINF_EM_OFF:
2688 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2689 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2690 TMR3NotifySuspend(pVM, pVCpu);
2691 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2692 return rc;
2693
2694 /*
2695 * Terminate the VM.
2696 */
2697 case VINF_EM_TERMINATE:
2698 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2699 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2700 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2701 TMR3NotifySuspend(pVM, pVCpu);
2702 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2703 return rc;
2704
2705
2706 /*
2707 * Out of memory, suspend the VM and stuff.
2708 */
2709 case VINF_EM_NO_MEMORY:
2710 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2711 Assert(enmOldState != EMSTATE_SUSPENDED);
2712 pVCpu->em.s.enmPrevState = enmOldState;
2713 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2714 TMR3NotifySuspend(pVM, pVCpu);
2715 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2716
2717 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2718 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2719 if (rc != VINF_EM_SUSPEND)
2720 {
2721 if (RT_SUCCESS_NP(rc))
2722 {
2723 AssertLogRelMsgFailed(("%Rrc\n", rc));
2724 rc = VERR_EM_INTERNAL_ERROR;
2725 }
2726 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2727 }
2728 return rc;
2729
2730 /*
2731 * Guest debug events.
2732 */
2733 case VINF_EM_DBG_STEPPED:
2734 case VINF_EM_DBG_STOP:
2735 case VINF_EM_DBG_EVENT:
2736 case VINF_EM_DBG_BREAKPOINT:
2737 case VINF_EM_DBG_STEP:
2738 if (enmOldState == EMSTATE_RAW)
2739 {
2740 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2741 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2742 }
2743 else if (enmOldState == EMSTATE_HM)
2744 {
2745 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2746 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2747 }
2748 else if (enmOldState == EMSTATE_NEM)
2749 {
2750 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2751 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2752 }
2753 else if (enmOldState == EMSTATE_REM)
2754 {
2755 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2756 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2757 }
2758 else
2759 {
2760 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2761 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2762 }
2763 break;
2764
2765 /*
2766 * Hypervisor debug events.
2767 */
2768 case VINF_EM_DBG_HYPER_STEPPED:
2769 case VINF_EM_DBG_HYPER_BREAKPOINT:
2770 case VINF_EM_DBG_HYPER_ASSERTION:
2771 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2772 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2773 break;
2774
2775 /*
2776 * Triple fault.
2777 */
2778 case VINF_EM_TRIPLE_FAULT:
2779 if (!pVM->em.s.fGuruOnTripleFault)
2780 {
2781 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2782 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2783 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2784 continue;
2785 }
2786 /* Else fall through and trigger a guru. */
2787 RT_FALL_THRU();
2788
2789 case VERR_VMM_RING0_ASSERTION:
2790 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2791 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2792 break;
2793
2794 /*
2795 * Any error code showing up here other than the ones we
2796 * know and process above are considered to be FATAL.
2797 *
2798 * Unknown warnings and informational status codes are also
2799 * included in this.
2800 */
2801 default:
2802 if (RT_SUCCESS_NP(rc))
2803 {
2804 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2805 rc = VERR_EM_INTERNAL_ERROR;
2806 }
2807 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2808 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2809 break;
2810 }
2811
2812 /*
2813 * Act on state transition.
2814 */
2815 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2816 if (enmOldState != enmNewState)
2817 {
2818 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2819
2820 /* Clear MWait flags and the unhalt FF. */
2821 if ( enmOldState == EMSTATE_HALTED
2822 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2823 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2824 && ( enmNewState == EMSTATE_RAW
2825 || enmNewState == EMSTATE_HM
2826 || enmNewState == EMSTATE_NEM
2827 || enmNewState == EMSTATE_REM
2828 || enmNewState == EMSTATE_IEM_THEN_REM
2829 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2830 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2831 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2832 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2833 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2834 {
2835 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2836 {
2837 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2838 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2839 }
2840 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2841 {
2842 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2843 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2844 }
2845 }
2846 }
2847 else
2848 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2849
2850 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2851 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2852
2853 /*
2854 * Act on the new state.
2855 */
2856 switch (enmNewState)
2857 {
2858 /*
2859 * Execute raw.
2860 */
2861 case EMSTATE_RAW:
2862#ifdef VBOX_WITH_RAW_MODE
2863 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2864#else
2865 AssertLogRelMsgFailed(("%Rrc\n", rc));
2866 rc = VERR_EM_INTERNAL_ERROR;
2867#endif
2868 break;
2869
2870 /*
2871 * Execute hardware accelerated raw.
2872 */
2873 case EMSTATE_HM:
2874 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2875 break;
2876
2877 /*
2878 * Execute hardware accelerated raw.
2879 */
2880 case EMSTATE_NEM:
2881 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2882 break;
2883
2884 /*
2885 * Execute recompiled.
2886 */
2887 case EMSTATE_REM:
2888 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2889 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2890 break;
2891
2892 /*
2893 * Execute in the interpreter.
2894 */
2895 case EMSTATE_IEM:
2896 {
2897#if 0 /* For testing purposes. */
2898 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2899 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2900 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2901 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2902 rc = VINF_SUCCESS;
2903 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2904#endif
2905 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2906 if (pVM->em.s.fIemExecutesAll)
2907 {
2908 Assert(rc != VINF_EM_RESCHEDULE_REM);
2909 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2910 Assert(rc != VINF_EM_RESCHEDULE_HM);
2911 }
2912 fFFDone = false;
2913 break;
2914 }
2915
2916 /*
2917 * Execute in IEM, hoping we can quickly switch aback to HM
2918 * or RAW execution. If our hopes fail, we go to REM.
2919 */
2920 case EMSTATE_IEM_THEN_REM:
2921 {
2922 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2923 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2924 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2925 break;
2926 }
2927
2928 /*
2929 * Application processor execution halted until SIPI.
2930 */
2931 case EMSTATE_WAIT_SIPI:
2932 /* no break */
2933 /*
2934 * hlt - execution halted until interrupt.
2935 */
2936 case EMSTATE_HALTED:
2937 {
2938 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2939 /* If HM (or someone else) store a pending interrupt in
2940 TRPM, it must be dispatched ASAP without any halting.
2941 Anything pending in TRPM has been accepted and the CPU
2942 should already be the right state to receive it. */
2943 if (TRPMHasTrap(pVCpu))
2944 rc = VINF_EM_RESCHEDULE;
2945 /* MWAIT has a special extension where it's woken up when
2946 an interrupt is pending even when IF=0. */
2947 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2948 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2949 {
2950 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2951 if (rc == VINF_SUCCESS)
2952 {
2953 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2954 APICUpdatePendingInterrupts(pVCpu);
2955
2956 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2957 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2958 {
2959 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2960 rc = VINF_EM_RESCHEDULE;
2961 }
2962 }
2963 }
2964 else
2965 {
2966 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2967 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2968 check VMCPU_FF_UPDATE_APIC here. */
2969 if ( rc == VINF_SUCCESS
2970 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2971 {
2972 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2973 rc = VINF_EM_RESCHEDULE;
2974 }
2975 }
2976
2977 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2978 break;
2979 }
2980
2981 /*
2982 * Suspended - return to VM.cpp.
2983 */
2984 case EMSTATE_SUSPENDED:
2985 TMR3NotifySuspend(pVM, pVCpu);
2986 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2987 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2988 return VINF_EM_SUSPEND;
2989
2990 /*
2991 * Debugging in the guest.
2992 */
2993 case EMSTATE_DEBUG_GUEST_RAW:
2994 case EMSTATE_DEBUG_GUEST_HM:
2995 case EMSTATE_DEBUG_GUEST_NEM:
2996 case EMSTATE_DEBUG_GUEST_IEM:
2997 case EMSTATE_DEBUG_GUEST_REM:
2998 TMR3NotifySuspend(pVM, pVCpu);
2999 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3000 TMR3NotifyResume(pVM, pVCpu);
3001 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3002 break;
3003
3004 /*
3005 * Debugging in the hypervisor.
3006 */
3007 case EMSTATE_DEBUG_HYPER:
3008 {
3009 TMR3NotifySuspend(pVM, pVCpu);
3010 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3011
3012 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3013 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3014 if (rc != VINF_SUCCESS)
3015 {
3016 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
3017 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3018 else
3019 {
3020 /* switch to guru meditation mode */
3021 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3022 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3023 VMMR3FatalDump(pVM, pVCpu, rc);
3024 }
3025 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3026 return rc;
3027 }
3028
3029 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3030 TMR3NotifyResume(pVM, pVCpu);
3031 break;
3032 }
3033
3034 /*
3035 * Guru meditation takes place in the debugger.
3036 */
3037 case EMSTATE_GURU_MEDITATION:
3038 {
3039 TMR3NotifySuspend(pVM, pVCpu);
3040 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3041 VMMR3FatalDump(pVM, pVCpu, rc);
3042 emR3Debug(pVM, pVCpu, rc);
3043 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3044 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3045 return rc;
3046 }
3047
3048 /*
3049 * The states we don't expect here.
3050 */
3051 case EMSTATE_NONE:
3052 case EMSTATE_TERMINATING:
3053 default:
3054 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3055 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3056 TMR3NotifySuspend(pVM, pVCpu);
3057 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3058 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3059 return VERR_EM_INTERNAL_ERROR;
3060 }
3061 } /* The Outer Main Loop */
3062 }
3063 else
3064 {
3065 /*
3066 * Fatal error.
3067 */
3068 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3069 TMR3NotifySuspend(pVM, pVCpu);
3070 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3071 VMMR3FatalDump(pVM, pVCpu, rc);
3072 emR3Debug(pVM, pVCpu, rc);
3073 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3074 /** @todo change the VM state! */
3075 return rc;
3076 }
3077
3078 /* not reached */
3079}
3080
3081/**
3082 * Notify EM of a state change (used by FTM)
3083 *
3084 * @param pVM The cross context VM structure.
3085 */
3086VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3087{
3088 PVMCPU pVCpu = VMMGetCpu(pVM);
3089
3090 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3091 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3092 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3093 return VINF_SUCCESS;
3094}
3095
3096/**
3097 * Notify EM of a state change (used by FTM)
3098 *
3099 * @param pVM The cross context VM structure.
3100 */
3101VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3102{
3103 PVMCPU pVCpu = VMMGetCpu(pVM);
3104 EMSTATE enmCurState = pVCpu->em.s.enmState;
3105
3106 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3107 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3108 pVCpu->em.s.enmPrevState = enmCurState;
3109 return VINF_SUCCESS;
3110}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette