VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72300

Last change on this file since 72300 was 72300, checked in by vboxsync, 7 years ago

NEM,STAM: Partition memory statistics for NEM. bugref:9044

  • STAM: Redid the way we handle statistics requiring fetching data from ring-0 (or elsewhere) by introducing a refresh group concept. We'll refresh the statistics for a group if needed and only once per enumeration/query. There's a new registration API for these.
  • NEM: Added memory balance statistics for the partition. Some failed fumbling thru VID.DLL/SYS, before realizing that hypercall is the only way to get at them.
  • NEM: Added a hypervisor input/output page buffer for non-EMT threads so we can get statistics. Put the related data and code into separate structure to save duplication.
  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 133.5 KB
Line 
1/* $Id: EM.cpp 72300 2018-05-23 15:13:06Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#include <VBox/vmm/em.h>
40#include <VBox/vmm/vmm.h>
41#include <VBox/vmm/patm.h>
42#include <VBox/vmm/csam.h>
43#include <VBox/vmm/selm.h>
44#include <VBox/vmm/trpm.h>
45#include <VBox/vmm/iem.h>
46#include <VBox/vmm/nem.h>
47#include <VBox/vmm/iom.h>
48#include <VBox/vmm/dbgf.h>
49#include <VBox/vmm/pgm.h>
50#ifdef VBOX_WITH_REM
51# include <VBox/vmm/rem.h>
52#endif
53#include <VBox/vmm/apic.h>
54#include <VBox/vmm/tm.h>
55#include <VBox/vmm/mm.h>
56#include <VBox/vmm/ssm.h>
57#include <VBox/vmm/pdmapi.h>
58#include <VBox/vmm/pdmcritsect.h>
59#include <VBox/vmm/pdmqueue.h>
60#include <VBox/vmm/hm.h>
61#include <VBox/vmm/patm.h>
62#include "EMInternal.h"
63#include <VBox/vmm/vm.h>
64#include <VBox/vmm/uvm.h>
65#include <VBox/vmm/cpumdis.h>
66#include <VBox/dis.h>
67#include <VBox/disopcode.h>
68#include "VMMTracing.h"
69
70#include <iprt/asm.h>
71#include <iprt/string.h>
72#include <iprt/stream.h>
73#include <iprt/thread.h>
74
75
76/*********************************************************************************************************************************
77* Defined Constants And Macros *
78*********************************************************************************************************************************/
79#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
80#define EM_NOTIFY_HM
81#endif
82
83
84/*********************************************************************************************************************************
85* Internal Functions *
86*********************************************************************************************************************************/
87static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
88static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
89#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
90static const char *emR3GetStateName(EMSTATE enmState);
91#endif
92static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
93#if defined(VBOX_WITH_REM) || defined(DEBUG)
94static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
95#endif
96static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
97int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool\n",
152 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault));
153
154#ifdef VBOX_WITH_REM
155 /*
156 * Initialize the REM critical section.
157 */
158 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
159 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
160 AssertRCReturn(rc, rc);
161#endif
162
163 /*
164 * Saved state.
165 */
166 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
167 NULL, NULL, NULL,
168 NULL, emR3Save, NULL,
169 NULL, emR3Load, NULL);
170 if (RT_FAILURE(rc))
171 return rc;
172
173 for (VMCPUID i = 0; i < pVM->cCpus; i++)
174 {
175 PVMCPU pVCpu = &pVM->aCpus[i];
176
177 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
178 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
179 pVCpu->em.s.fForceRAW = false;
180
181 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
182#ifdef VBOX_WITH_RAW_MODE
183 if (VM_IS_RAW_MODE_ENABLED(pVM))
184 {
185 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
186 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
187 }
188#endif
189
190 /* Force reset of the time slice. */
191 pVCpu->em.s.u64TimeSliceStart = 0;
192
193# define EM_REG_COUNTER(a, b, c) \
194 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
195 AssertRC(rc);
196
197# define EM_REG_COUNTER_USED(a, b, c) \
198 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
199 AssertRC(rc);
200
201# define EM_REG_PROFILE(a, b, c) \
202 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
203 AssertRC(rc);
204
205# define EM_REG_PROFILE_ADV(a, b, c) \
206 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
207 AssertRC(rc);
208
209 /*
210 * Statistics.
211 */
212#ifdef VBOX_WITH_STATISTICS
213 PEMSTATS pStats;
214 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
215 if (RT_FAILURE(rc))
216 return rc;
217
218 pVCpu->em.s.pStatsR3 = pStats;
219 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
220 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
221
222 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
223 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
224
225 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
226 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
227
228 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
302
303 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
304 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
305
306 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
358
359 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
360 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
361 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
370 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
387
388 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
389 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
390 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
391 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
392
393 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
394 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
395 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
396 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
397 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
398 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
399 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
400 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
401 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
402 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
403 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
404 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
405 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
406 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
407 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
410 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
419
420 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
421 pVCpu->em.s.pCliStatTree = 0;
422
423 /* these should be considered for release statistics. */
424 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
425 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
426 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
427 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
428 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
429 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
430 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
431 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
432#endif /* VBOX_WITH_STATISTICS */
433 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
434 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
435#ifdef VBOX_WITH_STATISTICS
436 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
437 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
440 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
441 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
442#endif /* VBOX_WITH_STATISTICS */
443
444 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
445 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
446 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
447 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
448 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
449
450 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
451 }
452
453 emR3InitDbg(pVM);
454 return VINF_SUCCESS;
455}
456
457
458/**
459 * Applies relocations to data and code managed by this
460 * component. This function will be called at init and
461 * whenever the VMM need to relocate it self inside the GC.
462 *
463 * @param pVM The cross context VM structure.
464 */
465VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
466{
467 LogFlow(("EMR3Relocate\n"));
468 for (VMCPUID i = 0; i < pVM->cCpus; i++)
469 {
470 PVMCPU pVCpu = &pVM->aCpus[i];
471 if (pVCpu->em.s.pStatsR3)
472 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
473 }
474}
475
476
477/**
478 * Reset the EM state for a CPU.
479 *
480 * Called by EMR3Reset and hot plugging.
481 *
482 * @param pVCpu The cross context virtual CPU structure.
483 */
484VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
485{
486 /* Reset scheduling state. */
487 pVCpu->em.s.fForceRAW = false;
488 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
489
490 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
491 out of the HALTED state here so that enmPrevState doesn't end up as
492 HALTED when EMR3Execute returns. */
493 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
494 {
495 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
496 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
497 }
498}
499
500
501/**
502 * Reset notification.
503 *
504 * @param pVM The cross context VM structure.
505 */
506VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
507{
508 Log(("EMR3Reset: \n"));
509 for (VMCPUID i = 0; i < pVM->cCpus; i++)
510 EMR3ResetCpu(&pVM->aCpus[i]);
511}
512
513
514/**
515 * Terminates the EM.
516 *
517 * Termination means cleaning up and freeing all resources,
518 * the VM it self is at this point powered off or suspended.
519 *
520 * @returns VBox status code.
521 * @param pVM The cross context VM structure.
522 */
523VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
524{
525 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
526
527#ifdef VBOX_WITH_REM
528 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
529#else
530 RT_NOREF(pVM);
531#endif
532 return VINF_SUCCESS;
533}
534
535
536/**
537 * Execute state save operation.
538 *
539 * @returns VBox status code.
540 * @param pVM The cross context VM structure.
541 * @param pSSM SSM operation handle.
542 */
543static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
544{
545 for (VMCPUID i = 0; i < pVM->cCpus; i++)
546 {
547 PVMCPU pVCpu = &pVM->aCpus[i];
548
549 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
550 AssertRCReturn(rc, rc);
551
552 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
553 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
554 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
555 AssertRCReturn(rc, rc);
556
557 /* Save mwait state. */
558 rc = SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
559 AssertRCReturn(rc, rc);
560 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
561 AssertRCReturn(rc, rc);
562 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
563 AssertRCReturn(rc, rc);
564 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
565 AssertRCReturn(rc, rc);
566 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
567 AssertRCReturn(rc, rc);
568 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
569 AssertRCReturn(rc, rc);
570 }
571 return VINF_SUCCESS;
572}
573
574
575/**
576 * Execute state load operation.
577 *
578 * @returns VBox status code.
579 * @param pVM The cross context VM structure.
580 * @param pSSM SSM operation handle.
581 * @param uVersion Data layout version.
582 * @param uPass The data pass.
583 */
584static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
585{
586 /*
587 * Validate version.
588 */
589 if ( uVersion > EM_SAVED_STATE_VERSION
590 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
591 {
592 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
593 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
594 }
595 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
596
597 /*
598 * Load the saved state.
599 */
600 for (VMCPUID i = 0; i < pVM->cCpus; i++)
601 {
602 PVMCPU pVCpu = &pVM->aCpus[i];
603
604 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
605 if (RT_FAILURE(rc))
606 pVCpu->em.s.fForceRAW = false;
607 AssertRCReturn(rc, rc);
608
609 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
610 {
611 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
612 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
613 AssertRCReturn(rc, rc);
614 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
615
616 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
617 }
618 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
619 {
620 /* Load mwait state. */
621 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
622 AssertRCReturn(rc, rc);
623 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
624 AssertRCReturn(rc, rc);
625 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
626 AssertRCReturn(rc, rc);
627 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
628 AssertRCReturn(rc, rc);
629 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
630 AssertRCReturn(rc, rc);
631 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
632 AssertRCReturn(rc, rc);
633 }
634
635 Assert(!pVCpu->em.s.pCliStatTree);
636 }
637 return VINF_SUCCESS;
638}
639
640
641/**
642 * Argument packet for emR3SetExecutionPolicy.
643 */
644struct EMR3SETEXECPOLICYARGS
645{
646 EMEXECPOLICY enmPolicy;
647 bool fEnforce;
648};
649
650
651/**
652 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
653 */
654static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
655{
656 /*
657 * Only the first CPU changes the variables.
658 */
659 if (pVCpu->idCpu == 0)
660 {
661 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
662 switch (pArgs->enmPolicy)
663 {
664 case EMEXECPOLICY_RECOMPILE_RING0:
665 pVM->fRecompileSupervisor = pArgs->fEnforce;
666 break;
667 case EMEXECPOLICY_RECOMPILE_RING3:
668 pVM->fRecompileUser = pArgs->fEnforce;
669 break;
670 case EMEXECPOLICY_IEM_ALL:
671 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
672 break;
673 default:
674 AssertFailedReturn(VERR_INVALID_PARAMETER);
675 }
676 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
677 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
678 }
679
680 /*
681 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
682 */
683 return pVCpu->em.s.enmState == EMSTATE_RAW
684 || pVCpu->em.s.enmState == EMSTATE_HM
685 || pVCpu->em.s.enmState == EMSTATE_NEM
686 || pVCpu->em.s.enmState == EMSTATE_IEM
687 || pVCpu->em.s.enmState == EMSTATE_REM
688 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
689 ? VINF_EM_RESCHEDULE
690 : VINF_SUCCESS;
691}
692
693
694/**
695 * Changes an execution scheduling policy parameter.
696 *
697 * This is used to enable or disable raw-mode / hardware-virtualization
698 * execution of user and supervisor code.
699 *
700 * @returns VINF_SUCCESS on success.
701 * @returns VINF_RESCHEDULE if a rescheduling might be required.
702 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
703 *
704 * @param pUVM The user mode VM handle.
705 * @param enmPolicy The scheduling policy to change.
706 * @param fEnforce Whether to enforce the policy or not.
707 */
708VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
709{
710 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
711 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
712 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
713
714 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
715 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
716}
717
718
719/**
720 * Queries an execution scheduling policy parameter.
721 *
722 * @returns VBox status code
723 * @param pUVM The user mode VM handle.
724 * @param enmPolicy The scheduling policy to query.
725 * @param pfEnforced Where to return the current value.
726 */
727VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
728{
729 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
730 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
731 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
732 PVM pVM = pUVM->pVM;
733 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
734
735 /* No need to bother EMTs with a query. */
736 switch (enmPolicy)
737 {
738 case EMEXECPOLICY_RECOMPILE_RING0:
739 *pfEnforced = pVM->fRecompileSupervisor;
740 break;
741 case EMEXECPOLICY_RECOMPILE_RING3:
742 *pfEnforced = pVM->fRecompileUser;
743 break;
744 case EMEXECPOLICY_IEM_ALL:
745 *pfEnforced = pVM->em.s.fIemExecutesAll;
746 break;
747 default:
748 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
749 }
750
751 return VINF_SUCCESS;
752}
753
754
755/**
756 * Raise a fatal error.
757 *
758 * Safely terminate the VM with full state report and stuff. This function
759 * will naturally never return.
760 *
761 * @param pVCpu The cross context virtual CPU structure.
762 * @param rc VBox status code.
763 */
764VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
765{
766 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
767 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
768}
769
770
771#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
772/**
773 * Gets the EM state name.
774 *
775 * @returns pointer to read only state name,
776 * @param enmState The state.
777 */
778static const char *emR3GetStateName(EMSTATE enmState)
779{
780 switch (enmState)
781 {
782 case EMSTATE_NONE: return "EMSTATE_NONE";
783 case EMSTATE_RAW: return "EMSTATE_RAW";
784 case EMSTATE_HM: return "EMSTATE_HM";
785 case EMSTATE_IEM: return "EMSTATE_IEM";
786 case EMSTATE_REM: return "EMSTATE_REM";
787 case EMSTATE_HALTED: return "EMSTATE_HALTED";
788 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
789 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
790 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
791 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
792 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
793 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
794 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
795 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
796 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
797 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
798 case EMSTATE_NEM: return "EMSTATE_NEM";
799 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
800 default: return "Unknown!";
801 }
802}
803#endif /* LOG_ENABLED || VBOX_STRICT */
804
805
806/**
807 * Debug loop.
808 *
809 * @returns VBox status code for EM.
810 * @param pVM The cross context VM structure.
811 * @param pVCpu The cross context virtual CPU structure.
812 * @param rc Current EM VBox status code.
813 */
814static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
815{
816 for (;;)
817 {
818 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
819 const VBOXSTRICTRC rcLast = rc;
820
821 /*
822 * Debug related RC.
823 */
824 switch (VBOXSTRICTRC_VAL(rc))
825 {
826 /*
827 * Single step an instruction.
828 */
829 case VINF_EM_DBG_STEP:
830 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
831 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
832 || pVCpu->em.s.fForceRAW /* paranoia */)
833#ifdef VBOX_WITH_RAW_MODE
834 rc = emR3RawStep(pVM, pVCpu);
835#else
836 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
837#endif
838 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
839 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
840 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
841 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
842#ifdef VBOX_WITH_REM
843 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
844 rc = emR3RemStep(pVM, pVCpu);
845#endif
846 else
847 {
848 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
849 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
850 rc = VINF_EM_DBG_STEPPED;
851 }
852 break;
853
854 /*
855 * Simple events: stepped, breakpoint, stop/assertion.
856 */
857 case VINF_EM_DBG_STEPPED:
858 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
859 break;
860
861 case VINF_EM_DBG_BREAKPOINT:
862 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
863 break;
864
865 case VINF_EM_DBG_STOP:
866 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
867 break;
868
869 case VINF_EM_DBG_EVENT:
870 rc = DBGFR3EventHandlePending(pVM, pVCpu);
871 break;
872
873 case VINF_EM_DBG_HYPER_STEPPED:
874 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
875 break;
876
877 case VINF_EM_DBG_HYPER_BREAKPOINT:
878 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
879 break;
880
881 case VINF_EM_DBG_HYPER_ASSERTION:
882 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
883 RTLogFlush(NULL);
884 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
885 break;
886
887 /*
888 * Guru meditation.
889 */
890 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
891 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
892 break;
893 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
894 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
895 break;
896 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
897 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
898 break;
899
900 default: /** @todo don't use default for guru, but make special errors code! */
901 {
902 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
903 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
904 break;
905 }
906 }
907
908 /*
909 * Process the result.
910 */
911 switch (VBOXSTRICTRC_VAL(rc))
912 {
913 /*
914 * Continue the debugging loop.
915 */
916 case VINF_EM_DBG_STEP:
917 case VINF_EM_DBG_STOP:
918 case VINF_EM_DBG_EVENT:
919 case VINF_EM_DBG_STEPPED:
920 case VINF_EM_DBG_BREAKPOINT:
921 case VINF_EM_DBG_HYPER_STEPPED:
922 case VINF_EM_DBG_HYPER_BREAKPOINT:
923 case VINF_EM_DBG_HYPER_ASSERTION:
924 break;
925
926 /*
927 * Resuming execution (in some form) has to be done here if we got
928 * a hypervisor debug event.
929 */
930 case VINF_SUCCESS:
931 case VINF_EM_RESUME:
932 case VINF_EM_SUSPEND:
933 case VINF_EM_RESCHEDULE:
934 case VINF_EM_RESCHEDULE_RAW:
935 case VINF_EM_RESCHEDULE_REM:
936 case VINF_EM_HALT:
937 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
938 {
939#ifdef VBOX_WITH_RAW_MODE
940 rc = emR3RawResumeHyper(pVM, pVCpu);
941 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
942 continue;
943#else
944 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
945#endif
946 }
947 if (rc == VINF_SUCCESS)
948 rc = VINF_EM_RESCHEDULE;
949 return rc;
950
951 /*
952 * The debugger isn't attached.
953 * We'll simply turn the thing off since that's the easiest thing to do.
954 */
955 case VERR_DBGF_NOT_ATTACHED:
956 switch (VBOXSTRICTRC_VAL(rcLast))
957 {
958 case VINF_EM_DBG_HYPER_STEPPED:
959 case VINF_EM_DBG_HYPER_BREAKPOINT:
960 case VINF_EM_DBG_HYPER_ASSERTION:
961 case VERR_TRPM_PANIC:
962 case VERR_TRPM_DONT_PANIC:
963 case VERR_VMM_RING0_ASSERTION:
964 case VERR_VMM_HYPER_CR3_MISMATCH:
965 case VERR_VMM_RING3_CALL_DISABLED:
966 return rcLast;
967 }
968 return VINF_EM_OFF;
969
970 /*
971 * Status codes terminating the VM in one or another sense.
972 */
973 case VINF_EM_TERMINATE:
974 case VINF_EM_OFF:
975 case VINF_EM_RESET:
976 case VINF_EM_NO_MEMORY:
977 case VINF_EM_RAW_STALE_SELECTOR:
978 case VINF_EM_RAW_IRET_TRAP:
979 case VERR_TRPM_PANIC:
980 case VERR_TRPM_DONT_PANIC:
981 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
982 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
983 case VERR_VMM_RING0_ASSERTION:
984 case VERR_VMM_HYPER_CR3_MISMATCH:
985 case VERR_VMM_RING3_CALL_DISABLED:
986 case VERR_INTERNAL_ERROR:
987 case VERR_INTERNAL_ERROR_2:
988 case VERR_INTERNAL_ERROR_3:
989 case VERR_INTERNAL_ERROR_4:
990 case VERR_INTERNAL_ERROR_5:
991 case VERR_IPE_UNEXPECTED_STATUS:
992 case VERR_IPE_UNEXPECTED_INFO_STATUS:
993 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
994 return rc;
995
996 /*
997 * The rest is unexpected, and will keep us here.
998 */
999 default:
1000 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1001 break;
1002 }
1003 } /* debug for ever */
1004}
1005
1006
1007#if defined(VBOX_WITH_REM) || defined(DEBUG)
1008/**
1009 * Steps recompiled code.
1010 *
1011 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1012 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1013 *
1014 * @param pVM The cross context VM structure.
1015 * @param pVCpu The cross context virtual CPU structure.
1016 */
1017static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1018{
1019 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1020
1021# ifdef VBOX_WITH_REM
1022 EMRemLock(pVM);
1023
1024 /*
1025 * Switch to REM, step instruction, switch back.
1026 */
1027 int rc = REMR3State(pVM, pVCpu);
1028 if (RT_SUCCESS(rc))
1029 {
1030 rc = REMR3Step(pVM, pVCpu);
1031 REMR3StateBack(pVM, pVCpu);
1032 }
1033 EMRemUnlock(pVM);
1034
1035# else
1036 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1037# endif
1038
1039 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1040 return rc;
1041}
1042#endif /* VBOX_WITH_REM || DEBUG */
1043
1044
1045#ifdef VBOX_WITH_REM
1046/**
1047 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1048 * critical section.
1049 *
1050 * @returns false - new fInREMState value.
1051 * @param pVM The cross context VM structure.
1052 * @param pVCpu The cross context virtual CPU structure.
1053 */
1054DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1055{
1056 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1057 REMR3StateBack(pVM, pVCpu);
1058 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1059
1060 EMRemUnlock(pVM);
1061 return false;
1062}
1063#endif
1064
1065
1066/**
1067 * Executes recompiled code.
1068 *
1069 * This function contains the recompiler version of the inner
1070 * execution loop (the outer loop being in EMR3ExecuteVM()).
1071 *
1072 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1073 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1074 *
1075 * @param pVM The cross context VM structure.
1076 * @param pVCpu The cross context virtual CPU structure.
1077 * @param pfFFDone Where to store an indicator telling whether or not
1078 * FFs were done before returning.
1079 *
1080 */
1081static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1082{
1083#ifdef LOG_ENABLED
1084 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1085 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1086
1087 if (pCtx->eflags.Bits.u1VM)
1088 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1089 else
1090 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1091#endif
1092 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1093
1094#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1095 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1096 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1097 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1098#endif
1099
1100 /*
1101 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1102 * or the REM suggests raw-mode execution.
1103 */
1104 *pfFFDone = false;
1105#ifdef VBOX_WITH_REM
1106 bool fInREMState = false;
1107#else
1108 uint32_t cLoops = 0;
1109#endif
1110 int rc = VINF_SUCCESS;
1111 for (;;)
1112 {
1113#ifdef VBOX_WITH_REM
1114 /*
1115 * Lock REM and update the state if not already in sync.
1116 *
1117 * Note! Big lock, but you are not supposed to own any lock when
1118 * coming in here.
1119 */
1120 if (!fInREMState)
1121 {
1122 EMRemLock(pVM);
1123 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1124
1125 /* Flush the recompiler translation blocks if the VCPU has changed,
1126 also force a full CPU state resync. */
1127 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1128 {
1129 REMFlushTBs(pVM);
1130 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1131 }
1132 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1133
1134 rc = REMR3State(pVM, pVCpu);
1135
1136 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1137 if (RT_FAILURE(rc))
1138 break;
1139 fInREMState = true;
1140
1141 /*
1142 * We might have missed the raising of VMREQ, TIMER and some other
1143 * important FFs while we were busy switching the state. So, check again.
1144 */
1145 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1146 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1147 {
1148 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1149 goto l_REMDoForcedActions;
1150 }
1151 }
1152#endif
1153
1154 /*
1155 * Execute REM.
1156 */
1157 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1158 {
1159 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1160#ifdef VBOX_WITH_REM
1161 rc = REMR3Run(pVM, pVCpu);
1162#else
1163 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1164#endif
1165 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1166 }
1167 else
1168 {
1169 /* Give up this time slice; virtual time continues */
1170 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1171 RTThreadSleep(5);
1172 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1173 rc = VINF_SUCCESS;
1174 }
1175
1176 /*
1177 * Deal with high priority post execution FFs before doing anything
1178 * else. Sync back the state and leave the lock to be on the safe side.
1179 */
1180 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1181 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1182 {
1183#ifdef VBOX_WITH_REM
1184 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1185#endif
1186 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1187 }
1188
1189 /*
1190 * Process the returned status code.
1191 */
1192 if (rc != VINF_SUCCESS)
1193 {
1194 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1195 break;
1196 if (rc != VINF_REM_INTERRUPED_FF)
1197 {
1198#ifndef VBOX_WITH_REM
1199 /* Try dodge unimplemented IEM trouble by reschduling. */
1200 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1201 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1202 {
1203 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1204 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1205 {
1206 rc = VINF_EM_RESCHEDULE;
1207 break;
1208 }
1209 }
1210#endif
1211
1212 /*
1213 * Anything which is not known to us means an internal error
1214 * and the termination of the VM!
1215 */
1216 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1217 break;
1218 }
1219 }
1220
1221
1222 /*
1223 * Check and execute forced actions.
1224 *
1225 * Sync back the VM state and leave the lock before calling any of
1226 * these, you never know what's going to happen here.
1227 */
1228#ifdef VBOX_HIGH_RES_TIMERS_HACK
1229 TMTimerPollVoid(pVM, pVCpu);
1230#endif
1231 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1232 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1233 || VMCPU_FF_IS_PENDING(pVCpu,
1234 VMCPU_FF_ALL_REM_MASK
1235 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1236 {
1237#ifdef VBOX_WITH_REM
1238l_REMDoForcedActions:
1239 if (fInREMState)
1240 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1241#endif
1242 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1243 rc = emR3ForcedActions(pVM, pVCpu, rc);
1244 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1245 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1246 if ( rc != VINF_SUCCESS
1247 && rc != VINF_EM_RESCHEDULE_REM)
1248 {
1249 *pfFFDone = true;
1250 break;
1251 }
1252 }
1253
1254#ifndef VBOX_WITH_REM
1255 /*
1256 * Have to check if we can get back to fast execution mode every so often.
1257 */
1258 if (!(++cLoops & 7))
1259 {
1260 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1261 if ( enmCheck != EMSTATE_REM
1262 && enmCheck != EMSTATE_IEM_THEN_REM)
1263 return VINF_EM_RESCHEDULE;
1264 }
1265#endif
1266
1267 } /* The Inner Loop, recompiled execution mode version. */
1268
1269
1270#ifdef VBOX_WITH_REM
1271 /*
1272 * Returning. Sync back the VM state if required.
1273 */
1274 if (fInREMState)
1275 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1276#endif
1277
1278 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1279 return rc;
1280}
1281
1282
1283#ifdef DEBUG
1284
1285int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1286{
1287 EMSTATE enmOldState = pVCpu->em.s.enmState;
1288
1289 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1290
1291 Log(("Single step BEGIN:\n"));
1292 for (uint32_t i = 0; i < cIterations; i++)
1293 {
1294 DBGFR3PrgStep(pVCpu);
1295 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1296 emR3RemStep(pVM, pVCpu);
1297 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1298 break;
1299 }
1300 Log(("Single step END:\n"));
1301 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1302 pVCpu->em.s.enmState = enmOldState;
1303 return VINF_EM_RESCHEDULE;
1304}
1305
1306#endif /* DEBUG */
1307
1308
1309/**
1310 * Try execute the problematic code in IEM first, then fall back on REM if there
1311 * is too much of it or if IEM doesn't implement something.
1312 *
1313 * @returns Strict VBox status code from IEMExecLots.
1314 * @param pVM The cross context VM structure.
1315 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1316 * @param pfFFDone Force flags done indicator.
1317 *
1318 * @thread EMT(pVCpu)
1319 */
1320static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1321{
1322 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1323 *pfFFDone = false;
1324
1325 /*
1326 * Execute in IEM for a while.
1327 */
1328 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1329 {
1330 uint32_t cInstructions;
1331 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1332 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1333 if (rcStrict != VINF_SUCCESS)
1334 {
1335 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1336 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1337 break;
1338
1339 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1340 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1341 return rcStrict;
1342 }
1343
1344 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1345 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1346 {
1347 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1348 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1349 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1350 pVCpu->em.s.enmState = enmNewState;
1351 return VINF_SUCCESS;
1352 }
1353
1354 /*
1355 * Check for pending actions.
1356 */
1357 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1358 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1359 return VINF_SUCCESS;
1360 }
1361
1362 /*
1363 * Switch to REM.
1364 */
1365 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1366 pVCpu->em.s.enmState = EMSTATE_REM;
1367 return VINF_SUCCESS;
1368}
1369
1370
1371/**
1372 * Decides whether to execute RAW, HWACC or REM.
1373 *
1374 * @returns new EM state
1375 * @param pVM The cross context VM structure.
1376 * @param pVCpu The cross context virtual CPU structure.
1377 * @param pCtx Pointer to the guest CPU context.
1378 */
1379EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1380{
1381 /*
1382 * When forcing raw-mode execution, things are simple.
1383 */
1384 if (pVCpu->em.s.fForceRAW)
1385 return EMSTATE_RAW;
1386
1387 /*
1388 * We stay in the wait for SIPI state unless explicitly told otherwise.
1389 */
1390 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1391 return EMSTATE_WAIT_SIPI;
1392
1393 /*
1394 * Execute everything in IEM?
1395 */
1396 if (pVM->em.s.fIemExecutesAll)
1397 return EMSTATE_IEM;
1398
1399 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1400 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1401 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1402
1403 X86EFLAGS EFlags = pCtx->eflags;
1404 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1405 {
1406 if (EMIsHwVirtExecutionEnabled(pVM))
1407 {
1408 if (VM_IS_HM_ENABLED(pVM))
1409 {
1410 if (HMR3CanExecuteGuest(pVM, pCtx))
1411 return EMSTATE_HM;
1412 }
1413 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1414 return EMSTATE_NEM;
1415
1416 /*
1417 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1418 * turns off monitoring features essential for raw mode!
1419 */
1420 return EMSTATE_IEM_THEN_REM;
1421 }
1422 }
1423
1424 /*
1425 * Standard raw-mode:
1426 *
1427 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1428 * or 32 bits protected mode ring 0 code
1429 *
1430 * The tests are ordered by the likelihood of being true during normal execution.
1431 */
1432 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1433 {
1434 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1435 return EMSTATE_REM;
1436 }
1437
1438# ifndef VBOX_RAW_V86
1439 if (EFlags.u32 & X86_EFL_VM) {
1440 Log2(("raw mode refused: VM_MASK\n"));
1441 return EMSTATE_REM;
1442 }
1443# endif
1444
1445 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1446 uint32_t u32CR0 = pCtx->cr0;
1447 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1448 {
1449 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1450 return EMSTATE_REM;
1451 }
1452
1453 if (pCtx->cr4 & X86_CR4_PAE)
1454 {
1455 uint32_t u32Dummy, u32Features;
1456
1457 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1458 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1459 return EMSTATE_REM;
1460 }
1461
1462 unsigned uSS = pCtx->ss.Sel;
1463 if ( pCtx->eflags.Bits.u1VM
1464 || (uSS & X86_SEL_RPL) == 3)
1465 {
1466 if (!EMIsRawRing3Enabled(pVM))
1467 return EMSTATE_REM;
1468
1469 if (!(EFlags.u32 & X86_EFL_IF))
1470 {
1471 Log2(("raw mode refused: IF (RawR3)\n"));
1472 return EMSTATE_REM;
1473 }
1474
1475 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1476 {
1477 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1478 return EMSTATE_REM;
1479 }
1480 }
1481 else
1482 {
1483 if (!EMIsRawRing0Enabled(pVM))
1484 return EMSTATE_REM;
1485
1486 if (EMIsRawRing1Enabled(pVM))
1487 {
1488 /* Only ring 0 and 1 supervisor code. */
1489 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1490 {
1491 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1492 return EMSTATE_REM;
1493 }
1494 }
1495 /* Only ring 0 supervisor code. */
1496 else if ((uSS & X86_SEL_RPL) != 0)
1497 {
1498 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1499 return EMSTATE_REM;
1500 }
1501
1502 // Let's start with pure 32 bits ring 0 code first
1503 /** @todo What's pure 32-bit mode? flat? */
1504 if ( !(pCtx->ss.Attr.n.u1DefBig)
1505 || !(pCtx->cs.Attr.n.u1DefBig))
1506 {
1507 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1508 return EMSTATE_REM;
1509 }
1510
1511 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1512 if (!(u32CR0 & X86_CR0_WP))
1513 {
1514 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1515 return EMSTATE_REM;
1516 }
1517
1518# ifdef VBOX_WITH_RAW_MODE
1519 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1520 {
1521 Log2(("raw r0 mode forced: patch code\n"));
1522# ifdef VBOX_WITH_SAFE_STR
1523 Assert(pCtx->tr.Sel);
1524# endif
1525 return EMSTATE_RAW;
1526 }
1527# endif /* VBOX_WITH_RAW_MODE */
1528
1529# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1530 if (!(EFlags.u32 & X86_EFL_IF))
1531 {
1532 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1533 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1534 return EMSTATE_REM;
1535 }
1536# endif
1537
1538# ifndef VBOX_WITH_RAW_RING1
1539 /** @todo still necessary??? */
1540 if (EFlags.Bits.u2IOPL != 0)
1541 {
1542 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1543 return EMSTATE_REM;
1544 }
1545# endif
1546 }
1547
1548 /*
1549 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1550 */
1551 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1552 {
1553 Log2(("raw mode refused: stale CS\n"));
1554 return EMSTATE_REM;
1555 }
1556 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1557 {
1558 Log2(("raw mode refused: stale SS\n"));
1559 return EMSTATE_REM;
1560 }
1561 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1562 {
1563 Log2(("raw mode refused: stale DS\n"));
1564 return EMSTATE_REM;
1565 }
1566 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1567 {
1568 Log2(("raw mode refused: stale ES\n"));
1569 return EMSTATE_REM;
1570 }
1571 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1572 {
1573 Log2(("raw mode refused: stale FS\n"));
1574 return EMSTATE_REM;
1575 }
1576 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1577 {
1578 Log2(("raw mode refused: stale GS\n"));
1579 return EMSTATE_REM;
1580 }
1581
1582# ifdef VBOX_WITH_SAFE_STR
1583 if (pCtx->tr.Sel == 0)
1584 {
1585 Log(("Raw mode refused -> TR=0\n"));
1586 return EMSTATE_REM;
1587 }
1588# endif
1589
1590 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1591 return EMSTATE_RAW;
1592}
1593
1594
1595/**
1596 * Executes all high priority post execution force actions.
1597 *
1598 * @returns rc or a fatal status code.
1599 *
1600 * @param pVM The cross context VM structure.
1601 * @param pVCpu The cross context virtual CPU structure.
1602 * @param rc The current rc.
1603 */
1604int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1605{
1606 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1607
1608 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1609 PDMCritSectBothFF(pVCpu);
1610
1611 /* Update CR3 (Nested Paging case for HM). */
1612 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1613 {
1614 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1615 if (RT_FAILURE(rc2))
1616 return rc2;
1617 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1618 }
1619
1620 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1621 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1622 {
1623 if (CPUMIsGuestInPAEMode(pVCpu))
1624 {
1625 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1626 AssertPtr(pPdpes);
1627
1628 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1629 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1630 }
1631 else
1632 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1633 }
1634
1635 /* IEM has pending work (typically memory write after INS instruction). */
1636 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1637 rc = VBOXSTRICTRC_TODO(IEMR3ProcessForceFlag(pVM, pVCpu, rc));
1638
1639 /* IOM has pending work (comitting an I/O or MMIO write). */
1640 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1641 rc = VBOXSTRICTRC_TODO(IOMR3ProcessForceFlag(pVM, pVCpu, rc));
1642
1643#ifdef VBOX_WITH_RAW_MODE
1644 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1645 CSAMR3DoPendingAction(pVM, pVCpu);
1646#endif
1647
1648 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1649 {
1650 if ( rc > VINF_EM_NO_MEMORY
1651 && rc <= VINF_EM_LAST)
1652 rc = VINF_EM_NO_MEMORY;
1653 }
1654
1655 return rc;
1656}
1657
1658#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1659/**
1660 * Helper for emR3ForcedActions() for injecting interrupts into the
1661 * nested-guest.
1662 *
1663 * @returns VBox status code.
1664 * @param pVCpu The cross context virtual CPU structure.
1665 * @param pCtx Pointer to the nested-guest CPU context.
1666 * @param pfResched Where to store whether a reschedule is required.
1667 * @param pfInject Where to store whether an interrupt was injected (and if
1668 * a wake up is pending).
1669 */
1670static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1671{
1672 *pfResched = false;
1673 *pfInject = false;
1674 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1675 {
1676 PVM pVM = pVCpu->CTX_SUFF(pVM);
1677 Assert(pCtx->hwvirt.fGif);
1678 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
1679#ifdef VBOX_WITH_RAW_MODE
1680 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1681#endif
1682 if (fVirtualGif)
1683 {
1684 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1685 {
1686 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1687 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1688 {
1689 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1690 {
1691 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1692 if (RT_SUCCESS(rcStrict))
1693 {
1694 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1695 * doesn't intercept HLT but intercepts INTR? */
1696 *pfResched = true;
1697 return VINF_EM_RESCHEDULE;
1698 }
1699
1700 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1701 return VINF_EM_TRIPLE_FAULT;
1702 }
1703
1704 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1705 /** @todo this really isn't nice, should properly handle this */
1706 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1707 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1708 || rc == VINF_EM_RESCHEDULE_HM
1709 || rc == VINF_EM_RESCHEDULE_RAW))
1710 {
1711 rc = VINF_EM_RESCHEDULE;
1712 }
1713
1714 *pfResched = true;
1715 *pfInject = true;
1716 return rc;
1717 }
1718 }
1719
1720 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1721 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1722 {
1723 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1724 {
1725 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1726 if (RT_SUCCESS(rcStrict))
1727 {
1728 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1729 * doesn't intercept HLT but intercepts VINTR? */
1730 *pfResched = true;
1731 return VINF_EM_RESCHEDULE;
1732 }
1733
1734 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1735 return VINF_EM_TRIPLE_FAULT;
1736 }
1737
1738 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1739 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1740 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1741 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1742 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1743
1744 *pfResched = true;
1745 *pfInject = true;
1746 return VINF_EM_RESCHEDULE;
1747 }
1748 }
1749 return VINF_SUCCESS;
1750 }
1751
1752 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1753 { /** @todo Nested VMX. */ }
1754
1755 /* Shouldn't really get here. */
1756 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1757 return VERR_EM_INTERNAL_ERROR;
1758}
1759#endif
1760
1761/**
1762 * Executes all pending forced actions.
1763 *
1764 * Forced actions can cause execution delays and execution
1765 * rescheduling. The first we deal with using action priority, so
1766 * that for instance pending timers aren't scheduled and ran until
1767 * right before execution. The rescheduling we deal with using
1768 * return codes. The same goes for VM termination, only in that case
1769 * we exit everything.
1770 *
1771 * @returns VBox status code of equal or greater importance/severity than rc.
1772 * The most important ones are: VINF_EM_RESCHEDULE,
1773 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1774 *
1775 * @param pVM The cross context VM structure.
1776 * @param pVCpu The cross context virtual CPU structure.
1777 * @param rc The current rc.
1778 *
1779 */
1780int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1781{
1782 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1783#ifdef VBOX_STRICT
1784 int rcIrq = VINF_SUCCESS;
1785#endif
1786 int rc2;
1787#define UPDATE_RC() \
1788 do { \
1789 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1790 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1791 break; \
1792 if (!rc || rc2 < rc) \
1793 rc = rc2; \
1794 } while (0)
1795 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1796
1797 /*
1798 * Post execution chunk first.
1799 */
1800 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1801 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1802 {
1803 /*
1804 * EMT Rendezvous (must be serviced before termination).
1805 */
1806 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1807 {
1808 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1809 UPDATE_RC();
1810 /** @todo HACK ALERT! The following test is to make sure EM+TM
1811 * thinks the VM is stopped/reset before the next VM state change
1812 * is made. We need a better solution for this, or at least make it
1813 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1814 * VINF_EM_SUSPEND). */
1815 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1816 {
1817 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1818 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1819 return rc;
1820 }
1821 }
1822
1823 /*
1824 * State change request (cleared by vmR3SetStateLocked).
1825 */
1826 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
1827 {
1828 VMSTATE enmState = VMR3GetState(pVM);
1829 switch (enmState)
1830 {
1831 case VMSTATE_FATAL_ERROR:
1832 case VMSTATE_FATAL_ERROR_LS:
1833 case VMSTATE_GURU_MEDITATION:
1834 case VMSTATE_GURU_MEDITATION_LS:
1835 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1836 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1837 return VINF_EM_SUSPEND;
1838
1839 case VMSTATE_DESTROYING:
1840 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1841 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1842 return VINF_EM_TERMINATE;
1843
1844 default:
1845 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1846 }
1847 }
1848
1849 /*
1850 * Debugger Facility polling.
1851 */
1852 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
1853 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
1854 {
1855 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
1856 UPDATE_RC();
1857 }
1858
1859 /*
1860 * Postponed reset request.
1861 */
1862 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
1863 {
1864 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
1865 UPDATE_RC();
1866 }
1867
1868#ifdef VBOX_WITH_RAW_MODE
1869 /*
1870 * CSAM page scanning.
1871 */
1872 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1873 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1874 {
1875 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1876
1877 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
1878 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1879
1880 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
1881 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1882 }
1883#endif
1884
1885 /*
1886 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1887 */
1888 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1889 {
1890 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1891 UPDATE_RC();
1892 if (rc == VINF_EM_NO_MEMORY)
1893 return rc;
1894 }
1895
1896 /* check that we got them all */
1897 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1898 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
1899 }
1900
1901 /*
1902 * Normal priority then.
1903 * (Executed in no particular order.)
1904 */
1905 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1906 {
1907 /*
1908 * PDM Queues are pending.
1909 */
1910 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1911 PDMR3QueueFlushAll(pVM);
1912
1913 /*
1914 * PDM DMA transfers are pending.
1915 */
1916 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1917 PDMR3DmaRun(pVM);
1918
1919 /*
1920 * EMT Rendezvous (make sure they are handled before the requests).
1921 */
1922 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1923 {
1924 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1925 UPDATE_RC();
1926 /** @todo HACK ALERT! The following test is to make sure EM+TM
1927 * thinks the VM is stopped/reset before the next VM state change
1928 * is made. We need a better solution for this, or at least make it
1929 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1930 * VINF_EM_SUSPEND). */
1931 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1932 {
1933 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1934 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1935 return rc;
1936 }
1937 }
1938
1939 /*
1940 * Requests from other threads.
1941 */
1942 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1943 {
1944 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
1945 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1946 {
1947 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1948 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1949 return rc2;
1950 }
1951 UPDATE_RC();
1952 /** @todo HACK ALERT! The following test is to make sure EM+TM
1953 * thinks the VM is stopped/reset before the next VM state change
1954 * is made. We need a better solution for this, or at least make it
1955 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1956 * VINF_EM_SUSPEND). */
1957 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1958 {
1959 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1960 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1961 return rc;
1962 }
1963 }
1964
1965#ifdef VBOX_WITH_REM
1966 /* Replay the handler notification changes. */
1967 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1968 {
1969 /* Try not to cause deadlocks. */
1970 if ( pVM->cCpus == 1
1971 || ( !PGMIsLockOwner(pVM)
1972 && !IOMIsLockWriteOwner(pVM))
1973 )
1974 {
1975 EMRemLock(pVM);
1976 REMR3ReplayHandlerNotifications(pVM);
1977 EMRemUnlock(pVM);
1978 }
1979 }
1980#endif
1981
1982 /* check that we got them all */
1983 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1984 }
1985
1986 /*
1987 * Normal priority then. (per-VCPU)
1988 * (Executed in no particular order.)
1989 */
1990 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
1991 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1992 {
1993 /*
1994 * Requests from other threads.
1995 */
1996 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
1997 {
1998 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
1999 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2000 {
2001 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2002 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2003 return rc2;
2004 }
2005 UPDATE_RC();
2006 /** @todo HACK ALERT! The following test is to make sure EM+TM
2007 * thinks the VM is stopped/reset before the next VM state change
2008 * is made. We need a better solution for this, or at least make it
2009 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2010 * VINF_EM_SUSPEND). */
2011 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2012 {
2013 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2014 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2015 return rc;
2016 }
2017 }
2018
2019 /* check that we got them all */
2020 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2021 }
2022
2023 /*
2024 * High priority pre execution chunk last.
2025 * (Executed in ascending priority order.)
2026 */
2027 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2028 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2029 {
2030 /*
2031 * Timers before interrupts.
2032 */
2033 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2034 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2035 TMR3TimerQueuesDo(pVM);
2036
2037 /*
2038 * Pick up asynchronously posted interrupts into the APIC.
2039 */
2040 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2041 APICUpdatePendingInterrupts(pVCpu);
2042
2043 /*
2044 * The instruction following an emulated STI should *always* be executed!
2045 *
2046 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2047 * the eip is the same as the inhibited instr address. Before we
2048 * are able to execute this instruction in raw mode (iret to
2049 * guest code) an external interrupt might force a world switch
2050 * again. Possibly allowing a guest interrupt to be dispatched
2051 * in the process. This could break the guest. Sounds very
2052 * unlikely, but such timing sensitive problem are not as rare as
2053 * you might think.
2054 */
2055 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2056 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2057 {
2058 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2059 {
2060 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2061 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2062 }
2063 else
2064 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2065 }
2066
2067 /*
2068 * Interrupts.
2069 */
2070 bool fWakeupPending = false;
2071 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2072 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2073 {
2074 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2075 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2076 {
2077 Assert(!HMR3IsEventPending(pVCpu));
2078 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2079#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2080 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2081 {
2082 bool fResched, fInject;
2083 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2084 if (fInject)
2085 {
2086 fWakeupPending = true;
2087#ifdef VBOX_STRICT
2088 rcIrq = rc2;
2089#endif
2090 }
2091 if (fResched)
2092 UPDATE_RC();
2093 }
2094 else
2095#endif
2096 {
2097 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2098#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2099 && pCtx->hwvirt.fGif
2100#endif
2101#ifdef VBOX_WITH_RAW_MODE
2102 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2103#endif
2104 && pCtx->eflags.Bits.u1IF)
2105 {
2106 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2107 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2108 /** @todo this really isn't nice, should properly handle this */
2109 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2110Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2111 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2112 || rc2 == VINF_EM_RESCHEDULE_HM
2113 || rc2 == VINF_EM_RESCHEDULE_RAW))
2114 {
2115 rc2 = VINF_EM_RESCHEDULE;
2116 }
2117#ifdef VBOX_STRICT
2118 rcIrq = rc2;
2119#endif
2120 UPDATE_RC();
2121 /* Reschedule required: We must not miss the wakeup below! */
2122 fWakeupPending = true;
2123 }
2124 }
2125 }
2126 }
2127
2128 /*
2129 * Allocate handy pages.
2130 */
2131 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2132 {
2133 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2134 UPDATE_RC();
2135 }
2136
2137 /*
2138 * Debugger Facility request.
2139 */
2140 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2141 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2142 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2143 {
2144 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2145 UPDATE_RC();
2146 }
2147
2148 /*
2149 * EMT Rendezvous (must be serviced before termination).
2150 */
2151 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2152 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2153 {
2154 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2155 UPDATE_RC();
2156 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2157 * stopped/reset before the next VM state change is made. We need a better
2158 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2159 * && rc >= VINF_EM_SUSPEND). */
2160 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2161 {
2162 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2163 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2164 return rc;
2165 }
2166 }
2167
2168 /*
2169 * State change request (cleared by vmR3SetStateLocked).
2170 */
2171 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2172 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2173 {
2174 VMSTATE enmState = VMR3GetState(pVM);
2175 switch (enmState)
2176 {
2177 case VMSTATE_FATAL_ERROR:
2178 case VMSTATE_FATAL_ERROR_LS:
2179 case VMSTATE_GURU_MEDITATION:
2180 case VMSTATE_GURU_MEDITATION_LS:
2181 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2182 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2183 return VINF_EM_SUSPEND;
2184
2185 case VMSTATE_DESTROYING:
2186 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2187 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2188 return VINF_EM_TERMINATE;
2189
2190 default:
2191 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2192 }
2193 }
2194
2195 /*
2196 * Out of memory? Since most of our fellow high priority actions may cause us
2197 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2198 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2199 * than us since we can terminate without allocating more memory.
2200 */
2201 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2202 {
2203 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2204 UPDATE_RC();
2205 if (rc == VINF_EM_NO_MEMORY)
2206 return rc;
2207 }
2208
2209 /*
2210 * If the virtual sync clock is still stopped, make TM restart it.
2211 */
2212 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2213 TMR3VirtualSyncFF(pVM, pVCpu);
2214
2215#ifdef DEBUG
2216 /*
2217 * Debug, pause the VM.
2218 */
2219 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2220 {
2221 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2222 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2223 return VINF_EM_SUSPEND;
2224 }
2225#endif
2226
2227 /* check that we got them all */
2228 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2229 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2230 }
2231
2232#undef UPDATE_RC
2233 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2234 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2235 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2236 return rc;
2237}
2238
2239
2240/**
2241 * Check if the preset execution time cap restricts guest execution scheduling.
2242 *
2243 * @returns true if allowed, false otherwise
2244 * @param pVM The cross context VM structure.
2245 * @param pVCpu The cross context virtual CPU structure.
2246 */
2247bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2248{
2249 uint64_t u64UserTime, u64KernelTime;
2250
2251 if ( pVM->uCpuExecutionCap != 100
2252 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2253 {
2254 uint64_t u64TimeNow = RTTimeMilliTS();
2255 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2256 {
2257 /* New time slice. */
2258 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2259 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2260 pVCpu->em.s.u64TimeSliceExec = 0;
2261 }
2262 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2263
2264 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2265 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2266 return false;
2267 }
2268 return true;
2269}
2270
2271
2272/**
2273 * Execute VM.
2274 *
2275 * This function is the main loop of the VM. The emulation thread
2276 * calls this function when the VM has been successfully constructed
2277 * and we're ready for executing the VM.
2278 *
2279 * Returning from this function means that the VM is turned off or
2280 * suspended (state already saved) and deconstruction is next in line.
2281 *
2282 * All interaction from other thread are done using forced actions
2283 * and signaling of the wait object.
2284 *
2285 * @returns VBox status code, informational status codes may indicate failure.
2286 * @param pVM The cross context VM structure.
2287 * @param pVCpu The cross context virtual CPU structure.
2288 */
2289VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2290{
2291 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2292 pVM,
2293 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2294 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2295 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2296 pVCpu->em.s.fForceRAW));
2297 VM_ASSERT_EMT(pVM);
2298 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2299 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2300 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2301 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2302
2303 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2304 if (rc == 0)
2305 {
2306 /*
2307 * Start the virtual time.
2308 */
2309 TMR3NotifyResume(pVM, pVCpu);
2310
2311 /*
2312 * The Outer Main Loop.
2313 */
2314 bool fFFDone = false;
2315
2316 /* Reschedule right away to start in the right state. */
2317 rc = VINF_SUCCESS;
2318
2319 /* If resuming after a pause or a state load, restore the previous
2320 state or else we'll start executing code. Else, just reschedule. */
2321 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2322 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2323 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2324 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2325 else
2326 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2327 pVCpu->em.s.cIemThenRemInstructions = 0;
2328 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2329
2330 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2331 for (;;)
2332 {
2333 /*
2334 * Before we can schedule anything (we're here because
2335 * scheduling is required) we must service any pending
2336 * forced actions to avoid any pending action causing
2337 * immediate rescheduling upon entering an inner loop
2338 *
2339 * Do forced actions.
2340 */
2341 if ( !fFFDone
2342 && RT_SUCCESS(rc)
2343 && rc != VINF_EM_TERMINATE
2344 && rc != VINF_EM_OFF
2345 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2346 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2347 {
2348 rc = emR3ForcedActions(pVM, pVCpu, rc);
2349 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2350 if ( ( rc == VINF_EM_RESCHEDULE_REM
2351 || rc == VINF_EM_RESCHEDULE_HM)
2352 && pVCpu->em.s.fForceRAW)
2353 rc = VINF_EM_RESCHEDULE_RAW;
2354 }
2355 else if (fFFDone)
2356 fFFDone = false;
2357
2358 /*
2359 * Now what to do?
2360 */
2361 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2362 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2363 switch (rc)
2364 {
2365 /*
2366 * Keep doing what we're currently doing.
2367 */
2368 case VINF_SUCCESS:
2369 break;
2370
2371 /*
2372 * Reschedule - to raw-mode execution.
2373 */
2374/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2375 case VINF_EM_RESCHEDULE_RAW:
2376 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2377 if (VM_IS_RAW_MODE_ENABLED(pVM))
2378 {
2379 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2380 pVCpu->em.s.enmState = EMSTATE_RAW;
2381 }
2382 else
2383 {
2384 AssertLogRelFailed();
2385 pVCpu->em.s.enmState = EMSTATE_NONE;
2386 }
2387 break;
2388
2389 /*
2390 * Reschedule - to HM or NEM.
2391 */
2392 case VINF_EM_RESCHEDULE_HM:
2393 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2394 Assert(!pVCpu->em.s.fForceRAW);
2395 if (VM_IS_HM_ENABLED(pVM))
2396 {
2397 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2398 pVCpu->em.s.enmState = EMSTATE_HM;
2399 }
2400 else if (VM_IS_NEM_ENABLED(pVM))
2401 {
2402 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2403 pVCpu->em.s.enmState = EMSTATE_NEM;
2404 }
2405 else
2406 {
2407 AssertLogRelFailed();
2408 pVCpu->em.s.enmState = EMSTATE_NONE;
2409 }
2410 break;
2411
2412 /*
2413 * Reschedule - to recompiled execution.
2414 */
2415 case VINF_EM_RESCHEDULE_REM:
2416 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2417 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2418 {
2419 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2420 enmOldState, EMSTATE_IEM_THEN_REM));
2421 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2422 {
2423 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2424 pVCpu->em.s.cIemThenRemInstructions = 0;
2425 }
2426 }
2427 else
2428 {
2429 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2430 pVCpu->em.s.enmState = EMSTATE_REM;
2431 }
2432 break;
2433
2434 /*
2435 * Resume.
2436 */
2437 case VINF_EM_RESUME:
2438 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2439 /* Don't reschedule in the halted or wait for SIPI case. */
2440 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2441 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2442 {
2443 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2444 break;
2445 }
2446 /* fall through and get scheduled. */
2447 RT_FALL_THRU();
2448
2449 /*
2450 * Reschedule.
2451 */
2452 case VINF_EM_RESCHEDULE:
2453 {
2454 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2455 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2456 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2457 pVCpu->em.s.cIemThenRemInstructions = 0;
2458 pVCpu->em.s.enmState = enmState;
2459 break;
2460 }
2461
2462 /*
2463 * Halted.
2464 */
2465 case VINF_EM_HALT:
2466 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2467 pVCpu->em.s.enmState = EMSTATE_HALTED;
2468 break;
2469
2470 /*
2471 * Switch to the wait for SIPI state (application processor only)
2472 */
2473 case VINF_EM_WAIT_SIPI:
2474 Assert(pVCpu->idCpu != 0);
2475 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2476 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2477 break;
2478
2479
2480 /*
2481 * Suspend.
2482 */
2483 case VINF_EM_SUSPEND:
2484 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2485 Assert(enmOldState != EMSTATE_SUSPENDED);
2486 pVCpu->em.s.enmPrevState = enmOldState;
2487 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2488 break;
2489
2490 /*
2491 * Reset.
2492 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2493 */
2494 case VINF_EM_RESET:
2495 {
2496 if (pVCpu->idCpu == 0)
2497 {
2498 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2499 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2500 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2501 pVCpu->em.s.cIemThenRemInstructions = 0;
2502 pVCpu->em.s.enmState = enmState;
2503 }
2504 else
2505 {
2506 /* All other VCPUs go into the wait for SIPI state. */
2507 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2508 }
2509 break;
2510 }
2511
2512 /*
2513 * Power Off.
2514 */
2515 case VINF_EM_OFF:
2516 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2517 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2518 TMR3NotifySuspend(pVM, pVCpu);
2519 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2520 return rc;
2521
2522 /*
2523 * Terminate the VM.
2524 */
2525 case VINF_EM_TERMINATE:
2526 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2527 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2528 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2529 TMR3NotifySuspend(pVM, pVCpu);
2530 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2531 return rc;
2532
2533
2534 /*
2535 * Out of memory, suspend the VM and stuff.
2536 */
2537 case VINF_EM_NO_MEMORY:
2538 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2539 Assert(enmOldState != EMSTATE_SUSPENDED);
2540 pVCpu->em.s.enmPrevState = enmOldState;
2541 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2542 TMR3NotifySuspend(pVM, pVCpu);
2543 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2544
2545 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2546 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2547 if (rc != VINF_EM_SUSPEND)
2548 {
2549 if (RT_SUCCESS_NP(rc))
2550 {
2551 AssertLogRelMsgFailed(("%Rrc\n", rc));
2552 rc = VERR_EM_INTERNAL_ERROR;
2553 }
2554 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2555 }
2556 return rc;
2557
2558 /*
2559 * Guest debug events.
2560 */
2561 case VINF_EM_DBG_STEPPED:
2562 case VINF_EM_DBG_STOP:
2563 case VINF_EM_DBG_EVENT:
2564 case VINF_EM_DBG_BREAKPOINT:
2565 case VINF_EM_DBG_STEP:
2566 if (enmOldState == EMSTATE_RAW)
2567 {
2568 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2569 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2570 }
2571 else if (enmOldState == EMSTATE_HM)
2572 {
2573 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2574 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2575 }
2576 else if (enmOldState == EMSTATE_NEM)
2577 {
2578 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2579 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2580 }
2581 else if (enmOldState == EMSTATE_REM)
2582 {
2583 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2584 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2585 }
2586 else
2587 {
2588 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2589 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2590 }
2591 break;
2592
2593 /*
2594 * Hypervisor debug events.
2595 */
2596 case VINF_EM_DBG_HYPER_STEPPED:
2597 case VINF_EM_DBG_HYPER_BREAKPOINT:
2598 case VINF_EM_DBG_HYPER_ASSERTION:
2599 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2600 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2601 break;
2602
2603 /*
2604 * Triple fault.
2605 */
2606 case VINF_EM_TRIPLE_FAULT:
2607 if (!pVM->em.s.fGuruOnTripleFault)
2608 {
2609 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2610 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2611 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2612 continue;
2613 }
2614 /* Else fall through and trigger a guru. */
2615 RT_FALL_THRU();
2616
2617 case VERR_VMM_RING0_ASSERTION:
2618 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2619 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2620 break;
2621
2622 /*
2623 * Any error code showing up here other than the ones we
2624 * know and process above are considered to be FATAL.
2625 *
2626 * Unknown warnings and informational status codes are also
2627 * included in this.
2628 */
2629 default:
2630 if (RT_SUCCESS_NP(rc))
2631 {
2632 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2633 rc = VERR_EM_INTERNAL_ERROR;
2634 }
2635 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2636 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2637 break;
2638 }
2639
2640 /*
2641 * Act on state transition.
2642 */
2643 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2644 if (enmOldState != enmNewState)
2645 {
2646 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2647
2648 /* Clear MWait flags and the unhalt FF. */
2649 if ( enmOldState == EMSTATE_HALTED
2650 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2651 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2652 && ( enmNewState == EMSTATE_RAW
2653 || enmNewState == EMSTATE_HM
2654 || enmNewState == EMSTATE_NEM
2655 || enmNewState == EMSTATE_REM
2656 || enmNewState == EMSTATE_IEM_THEN_REM
2657 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2658 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2659 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2660 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2661 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2662 {
2663 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2664 {
2665 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2666 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2667 }
2668 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2669 {
2670 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2671 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2672 }
2673 }
2674 }
2675 else
2676 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2677
2678 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2679 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2680
2681 /*
2682 * Act on the new state.
2683 */
2684 switch (enmNewState)
2685 {
2686 /*
2687 * Execute raw.
2688 */
2689 case EMSTATE_RAW:
2690#ifdef VBOX_WITH_RAW_MODE
2691 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2692#else
2693 AssertLogRelMsgFailed(("%Rrc\n", rc));
2694 rc = VERR_EM_INTERNAL_ERROR;
2695#endif
2696 break;
2697
2698 /*
2699 * Execute hardware accelerated raw.
2700 */
2701 case EMSTATE_HM:
2702 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2703 break;
2704
2705 /*
2706 * Execute hardware accelerated raw.
2707 */
2708 case EMSTATE_NEM:
2709 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2710 break;
2711
2712 /*
2713 * Execute recompiled.
2714 */
2715 case EMSTATE_REM:
2716 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2717 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2718 break;
2719
2720 /*
2721 * Execute in the interpreter.
2722 */
2723 case EMSTATE_IEM:
2724 {
2725#if 0 /* For testing purposes. */
2726 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2727 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2728 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2729 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2730 rc = VINF_SUCCESS;
2731 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2732#endif
2733 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2734 if (pVM->em.s.fIemExecutesAll)
2735 {
2736 Assert(rc != VINF_EM_RESCHEDULE_REM);
2737 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2738 Assert(rc != VINF_EM_RESCHEDULE_HM);
2739 }
2740 fFFDone = false;
2741 break;
2742 }
2743
2744 /*
2745 * Execute in IEM, hoping we can quickly switch aback to HM
2746 * or RAW execution. If our hopes fail, we go to REM.
2747 */
2748 case EMSTATE_IEM_THEN_REM:
2749 {
2750 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2751 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2752 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2753 break;
2754 }
2755
2756 /*
2757 * Application processor execution halted until SIPI.
2758 */
2759 case EMSTATE_WAIT_SIPI:
2760 /* no break */
2761 /*
2762 * hlt - execution halted until interrupt.
2763 */
2764 case EMSTATE_HALTED:
2765 {
2766 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2767 /* If HM (or someone else) store a pending interrupt in
2768 TRPM, it must be dispatched ASAP without any halting.
2769 Anything pending in TRPM has been accepted and the CPU
2770 should already be the right state to receive it. */
2771 if (TRPMHasTrap(pVCpu))
2772 rc = VINF_EM_RESCHEDULE;
2773 /* MWAIT has a special extension where it's woken up when
2774 an interrupt is pending even when IF=0. */
2775 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2776 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2777 {
2778 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2779 if (rc == VINF_SUCCESS)
2780 {
2781 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2782 APICUpdatePendingInterrupts(pVCpu);
2783
2784 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2785 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2786 {
2787 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2788 rc = VINF_EM_RESCHEDULE;
2789 }
2790 }
2791 }
2792 else
2793 {
2794 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2795 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2796 check VMCPU_FF_UPDATE_APIC here. */
2797 if ( rc == VINF_SUCCESS
2798 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2799 {
2800 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2801 rc = VINF_EM_RESCHEDULE;
2802 }
2803 }
2804
2805 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2806 break;
2807 }
2808
2809 /*
2810 * Suspended - return to VM.cpp.
2811 */
2812 case EMSTATE_SUSPENDED:
2813 TMR3NotifySuspend(pVM, pVCpu);
2814 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2815 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2816 return VINF_EM_SUSPEND;
2817
2818 /*
2819 * Debugging in the guest.
2820 */
2821 case EMSTATE_DEBUG_GUEST_RAW:
2822 case EMSTATE_DEBUG_GUEST_HM:
2823 case EMSTATE_DEBUG_GUEST_NEM:
2824 case EMSTATE_DEBUG_GUEST_IEM:
2825 case EMSTATE_DEBUG_GUEST_REM:
2826 TMR3NotifySuspend(pVM, pVCpu);
2827 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2828 TMR3NotifyResume(pVM, pVCpu);
2829 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2830 break;
2831
2832 /*
2833 * Debugging in the hypervisor.
2834 */
2835 case EMSTATE_DEBUG_HYPER:
2836 {
2837 TMR3NotifySuspend(pVM, pVCpu);
2838 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2839
2840 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
2841 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2842 if (rc != VINF_SUCCESS)
2843 {
2844 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
2845 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2846 else
2847 {
2848 /* switch to guru meditation mode */
2849 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2850 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2851 VMMR3FatalDump(pVM, pVCpu, rc);
2852 }
2853 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2854 return rc;
2855 }
2856
2857 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2858 TMR3NotifyResume(pVM, pVCpu);
2859 break;
2860 }
2861
2862 /*
2863 * Guru meditation takes place in the debugger.
2864 */
2865 case EMSTATE_GURU_MEDITATION:
2866 {
2867 TMR3NotifySuspend(pVM, pVCpu);
2868 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2869 VMMR3FatalDump(pVM, pVCpu, rc);
2870 emR3Debug(pVM, pVCpu, rc);
2871 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2872 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2873 return rc;
2874 }
2875
2876 /*
2877 * The states we don't expect here.
2878 */
2879 case EMSTATE_NONE:
2880 case EMSTATE_TERMINATING:
2881 default:
2882 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2883 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2884 TMR3NotifySuspend(pVM, pVCpu);
2885 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2886 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2887 return VERR_EM_INTERNAL_ERROR;
2888 }
2889 } /* The Outer Main Loop */
2890 }
2891 else
2892 {
2893 /*
2894 * Fatal error.
2895 */
2896 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2897 TMR3NotifySuspend(pVM, pVCpu);
2898 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
2899 VMMR3FatalDump(pVM, pVCpu, rc);
2900 emR3Debug(pVM, pVCpu, rc);
2901 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2902 /** @todo change the VM state! */
2903 return rc;
2904 }
2905
2906 /* not reached */
2907}
2908
2909/**
2910 * Notify EM of a state change (used by FTM)
2911 *
2912 * @param pVM The cross context VM structure.
2913 */
2914VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
2915{
2916 PVMCPU pVCpu = VMMGetCpu(pVM);
2917
2918 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2919 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2920 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2921 return VINF_SUCCESS;
2922}
2923
2924/**
2925 * Notify EM of a state change (used by FTM)
2926 *
2927 * @param pVM The cross context VM structure.
2928 */
2929VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
2930{
2931 PVMCPU pVCpu = VMMGetCpu(pVM);
2932 EMSTATE enmCurState = pVCpu->em.s.enmState;
2933
2934 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2935 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2936 pVCpu->em.s.enmPrevState = enmCurState;
2937 return VINF_SUCCESS;
2938}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette