VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EM.cpp@ 72606

Last change on this file since 72606 was 72606, checked in by vboxsync, 7 years ago

HMVMXR0.cpp,EM: Code for optimizing I/O port, MMIO and CPUID exits (currently disabled by default). bugref:9198

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 143.0 KB
Line 
1/* $Id: EM.cpp 72606 2018-06-18 19:03:15Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HmExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34
35/*********************************************************************************************************************************
36* Header Files *
37*********************************************************************************************************************************/
38#define LOG_GROUP LOG_GROUP_EM
39#define VMCPU_INCL_CPUM_GST_CTX /* for CPUM_IMPORT_GUEST_STATE_RET */
40#include <VBox/vmm/em.h>
41#include <VBox/vmm/vmm.h>
42#include <VBox/vmm/patm.h>
43#include <VBox/vmm/csam.h>
44#include <VBox/vmm/selm.h>
45#include <VBox/vmm/trpm.h>
46#include <VBox/vmm/iem.h>
47#include <VBox/vmm/nem.h>
48#include <VBox/vmm/iom.h>
49#include <VBox/vmm/dbgf.h>
50#include <VBox/vmm/pgm.h>
51#ifdef VBOX_WITH_REM
52# include <VBox/vmm/rem.h>
53#endif
54#include <VBox/vmm/apic.h>
55#include <VBox/vmm/tm.h>
56#include <VBox/vmm/mm.h>
57#include <VBox/vmm/ssm.h>
58#include <VBox/vmm/pdmapi.h>
59#include <VBox/vmm/pdmcritsect.h>
60#include <VBox/vmm/pdmqueue.h>
61#include <VBox/vmm/hm.h>
62#include <VBox/vmm/patm.h>
63#include "EMInternal.h"
64#include <VBox/vmm/vm.h>
65#include <VBox/vmm/uvm.h>
66#include <VBox/vmm/cpumdis.h>
67#include <VBox/dis.h>
68#include <VBox/disopcode.h>
69#include "VMMTracing.h"
70
71#include <iprt/asm.h>
72#include <iprt/string.h>
73#include <iprt/stream.h>
74#include <iprt/thread.h>
75
76
77/*********************************************************************************************************************************
78* Defined Constants And Macros *
79*********************************************************************************************************************************/
80#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
81#define EM_NOTIFY_HM
82#endif
83
84
85/*********************************************************************************************************************************
86* Internal Functions *
87*********************************************************************************************************************************/
88static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
89static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
90#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
91static const char *emR3GetStateName(EMSTATE enmState);
92#endif
93static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc);
94#if defined(VBOX_WITH_REM) || defined(DEBUG)
95static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
96#endif
97static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
98
99
100/**
101 * Initializes the EM.
102 *
103 * @returns VBox status code.
104 * @param pVM The cross context VM structure.
105 */
106VMMR3_INT_DECL(int) EMR3Init(PVM pVM)
107{
108 LogFlow(("EMR3Init\n"));
109 /*
110 * Assert alignment and sizes.
111 */
112 AssertCompileMemberAlignment(VM, em.s, 32);
113 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
114 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
115
116 /*
117 * Init the structure.
118 */
119 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
120 PCFGMNODE pCfgRoot = CFGMR3GetRoot(pVM);
121 PCFGMNODE pCfgEM = CFGMR3GetChild(pCfgRoot, "EM");
122
123 bool fEnabled;
124 int rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR3Enabled", &fEnabled, true);
125 AssertLogRelRCReturn(rc, rc);
126 pVM->fRecompileUser = !fEnabled;
127
128 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR0Enabled", &fEnabled, true);
129 AssertLogRelRCReturn(rc, rc);
130 pVM->fRecompileSupervisor = !fEnabled;
131
132#ifdef VBOX_WITH_RAW_RING1
133 rc = CFGMR3QueryBoolDef(pCfgRoot, "RawR1Enabled", &pVM->fRawRing1Enabled, false);
134 AssertLogRelRCReturn(rc, rc);
135#else
136 pVM->fRawRing1Enabled = false; /* Disabled by default. */
137#endif
138
139 rc = CFGMR3QueryBoolDef(pCfgEM, "IemExecutesAll", &pVM->em.s.fIemExecutesAll, false);
140 AssertLogRelRCReturn(rc, rc);
141
142 rc = CFGMR3QueryBoolDef(pCfgEM, "TripleFaultReset", &fEnabled, false);
143 AssertLogRelRCReturn(rc, rc);
144 pVM->em.s.fGuruOnTripleFault = !fEnabled;
145 if (!pVM->em.s.fGuruOnTripleFault && pVM->cCpus > 1)
146 {
147 LogRel(("EM: Overriding /EM/TripleFaultReset, must be false on SMP.\n"));
148 pVM->em.s.fGuruOnTripleFault = true;
149 }
150
151 /**
152 * @cfgm{/EM/ExitOptimizationEnabled, bool, true for NEM otherwise false}
153 * Whether to try correlate exit history, detect hot spots and try optimize
154 * these using IEM if there are other exits close by.
155 * @todo enable for HM too.
156 */
157 bool fExitOptimizationEnabled = true;
158 rc = CFGMR3QueryBoolDef(pCfgEM, "ExitOptimizationEnabled", &fExitOptimizationEnabled, VM_IS_NEM_ENABLED(pVM));
159 AssertLogRelRCReturn(rc, rc);
160
161 for (VMCPUID i = 0; i < pVM->cCpus; i++)
162 pVM->aCpus[i].em.s.fExitOptimizationEnabled = fExitOptimizationEnabled;
163
164 LogRel(("EMR3Init: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fRawRing1Enabled=%RTbool fIemExecutesAll=%RTbool fGuruOnTripleFault=%RTbool fExitOptimizationEnabled=%RTbool\n",
165 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->fRawRing1Enabled, pVM->em.s.fIemExecutesAll, pVM->em.s.fGuruOnTripleFault, fExitOptimizationEnabled));
166
167#ifdef VBOX_WITH_REM
168 /*
169 * Initialize the REM critical section.
170 */
171 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
172 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
173 AssertRCReturn(rc, rc);
174#endif
175
176 /*
177 * Saved state.
178 */
179 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
180 NULL, NULL, NULL,
181 NULL, emR3Save, NULL,
182 NULL, emR3Load, NULL);
183 if (RT_FAILURE(rc))
184 return rc;
185
186 for (VMCPUID i = 0; i < pVM->cCpus; i++)
187 {
188 PVMCPU pVCpu = &pVM->aCpus[i];
189
190 pVCpu->em.s.enmState = i == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
191 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
192 pVCpu->em.s.fForceRAW = false;
193 pVCpu->em.s.u64TimeSliceStart = 0; /* paranoia */
194 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
195
196 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
197#ifdef VBOX_WITH_RAW_MODE
198 if (VM_IS_RAW_MODE_ENABLED(pVM))
199 {
200 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
201 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
202 }
203#endif
204
205# define EM_REG_COUNTER(a, b, c) \
206 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
207 AssertRC(rc);
208
209# define EM_REG_COUNTER_USED(a, b, c) \
210 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
211 AssertRC(rc);
212
213# define EM_REG_PROFILE(a, b, c) \
214 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
215 AssertRC(rc);
216
217# define EM_REG_PROFILE_ADV(a, b, c) \
218 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
219 AssertRC(rc);
220
221 /*
222 * Statistics.
223 */
224#ifdef VBOX_WITH_STATISTICS
225 PEMSTATS pStats;
226 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
227 if (RT_FAILURE(rc))
228 return rc;
229
230 pVCpu->em.s.pStatsR3 = pStats;
231 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
232 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
233
234 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
235 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
236
237 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
238 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
239
240 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
259 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
260 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
261 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
262 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
263 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatRZSmsw, "/EM/CPU%d/RZ/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
313 EM_REG_COUNTER_USED(&pStats->StatR3Smsw, "/EM/CPU%d/R3/Interpret/Success/Smsw", "The number of times SMSW was successfully interpreted.");
314
315 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
316 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
317
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
342 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
343 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
344 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
345 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
346 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
347 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
348 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
349 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
350 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
351 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
352 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
353 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
354 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
355 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
356 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
357 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
358 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
359 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
360 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
361 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
362 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
363 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
364 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
365 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
366 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
367 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
368 EM_REG_COUNTER_USED(&pStats->StatRZFailedSmsw, "/EM/CPU%d/RZ/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
369 EM_REG_COUNTER_USED(&pStats->StatR3FailedSmsw, "/EM/CPU%d/R3/Interpret/Failed/Smsw", "The number of times SMSW was not interpreted.");
370
371 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
372 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
373 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
374 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
375 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
376 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
377 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
378 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
379 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
380 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
381 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
382 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
383 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
384 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
385 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
386 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
387 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
388 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
389 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
390 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
391 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
392 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
393 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
394 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
395 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
396 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
397 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
398 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
399
400 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
401 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
402 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
403 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
404
405 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "I/O instructions restarted in ring-3.");
406 EM_REG_COUNTER_USED(&pStats->StatIoIem, "/EM/CPU%d/R3/PrivInst/IoIem", "I/O instructions end to IEM in ring-3.");
407 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
408 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
409 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
410 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
411 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
412 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 write instructions.");
413 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 write instructions.");
414 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 write instructions.");
415 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 write instructions.");
416 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 write instructions.");
417 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 read instructions.");
418 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 read instructions.");
419 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 read instructions.");
420 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 read instructions.");
421 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 read instructions.");
422 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
423 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
424 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
425 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
426 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
427 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
428 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
429 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
430 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
431
432 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
433 pVCpu->em.s.pCliStatTree = 0;
434
435 /* these should be considered for release statistics. */
436 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
437 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
438 EM_REG_PROFILE(&pVCpu->em.s.StatHMEntry, "/PROF/CPU%d/EM/HMEnter", "Profiling Hardware Accelerated Mode entry overhead.");
439 EM_REG_PROFILE(&pVCpu->em.s.StatHMExec, "/PROF/CPU%d/EM/HMExec", "Profiling Hardware Accelerated Mode execution.");
440 EM_REG_COUNTER(&pVCpu->em.s.StatHMExecuteCalled, "/PROF/CPU%d/EM/HMExecuteCalled", "Number of times enmR3HMExecute is called.");
441 EM_REG_PROFILE(&pVCpu->em.s.StatIEMEmu, "/PROF/CPU%d/EM/IEMEmuSingle", "Profiling single instruction IEM execution.");
442 EM_REG_PROFILE(&pVCpu->em.s.StatIEMThenREM, "/PROF/CPU%d/EM/IEMThenRem", "Profiling IEM-then-REM instruction execution (by IEM).");
443 EM_REG_PROFILE(&pVCpu->em.s.StatNEMEntry, "/PROF/CPU%d/EM/NEMEnter", "Profiling NEM entry overhead.");
444#endif /* VBOX_WITH_STATISTICS */
445 EM_REG_PROFILE(&pVCpu->em.s.StatNEMExec, "/PROF/CPU%d/EM/NEMExec", "Profiling NEM execution.");
446 EM_REG_COUNTER(&pVCpu->em.s.StatNEMExecuteCalled, "/PROF/CPU%d/EM/NEMExecuteCalled", "Number of times enmR3NEMExecute is called.");
447#ifdef VBOX_WITH_STATISTICS
448 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
449 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
450 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
451 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
452 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
453 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
454#endif /* VBOX_WITH_STATISTICS */
455
456 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
457 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
458 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
459 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
460 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
461
462 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
463
464 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.iNextExit, STAMTYPE_U64, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
465 "Number of recorded exits.", "/PROF/CPU%u/EM/RecordedExits", i);
466 AssertRC(rc);
467
468 /* History record statistics */
469 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.cExitRecordUsed, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES,
470 "Number of used hash table entries.", "/EM/CPU%u/ExitHashing/Used", i);
471 AssertRC(rc);
472
473 for (uint32_t iStep = 0; iStep < RT_ELEMENTS(pVCpu->em.s.aStatHistoryRecHits); iStep++)
474 {
475 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecHits[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
476 "Number of hits at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Hits", i, iStep);
477 AssertRC(rc);
478 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
479 "Number of type changes at this step.", "/EM/CPU%u/ExitHashing/Step%02u-TypeChanges", i, iStep);
480 AssertRC(rc);
481 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecTypeChanged[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
482 "Number of replacments at this step.", "/EM/CPU%u/ExitHashing/Step%02u-Replacments", i, iStep);
483 AssertRC(rc);
484 rc = STAMR3RegisterF(pVM, &pVCpu->em.s.aStatHistoryRecNew[iStep], STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES,
485 "Number of new inserts at this step.", "/EM/CPU%u/ExitHashing/Step%02u-NewInserts", i, iStep);
486 AssertRC(rc);
487 }
488
489 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryExec, "/EM/CPU%d/ExitOpt/Exec", "Profiling normal EMHistoryExec operation.");
490 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecSavedExits, "/EM/CPU%d/ExitOpt/ExecSavedExit", "Net number of saved exits.");
491 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryExecInstructions, "/EM/CPU%d/ExitOpt/ExecInstructions", "Number of instructions executed during normal operation.");
492 EM_REG_PROFILE(&pVCpu->em.s.StatHistoryProbe, "/EM/CPU%d/ExitOpt/Probe", "Profiling EMHistoryExec when probing.");
493 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbeInstructions, "/EM/CPU%d/ExitOpt/ProbeInstructions", "Number of instructions executed during probing.");
494 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedNormal, "/EM/CPU%d/ExitOpt/ProbedNormal", "Number of EMEXITACTION_NORMAL_PROBED results.");
495 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedExecWithMax, "/EM/CPU%d/ExitOpt/ProbedExecWithMax", "Number of EMEXITACTION_EXEC_WITH_MAX results.");
496 EM_REG_COUNTER(&pVCpu->em.s.StatHistoryProbedToRing3, "/EM/CPU%d/ExitOpt/ProbedToRing3", "Number of ring-3 probe continuations.");
497 }
498
499 emR3InitDbg(pVM);
500 return VINF_SUCCESS;
501}
502
503
504/**
505 * Applies relocations to data and code managed by this
506 * component. This function will be called at init and
507 * whenever the VMM need to relocate it self inside the GC.
508 *
509 * @param pVM The cross context VM structure.
510 */
511VMMR3_INT_DECL(void) EMR3Relocate(PVM pVM)
512{
513 LogFlow(("EMR3Relocate\n"));
514 for (VMCPUID i = 0; i < pVM->cCpus; i++)
515 {
516 PVMCPU pVCpu = &pVM->aCpus[i];
517 if (pVCpu->em.s.pStatsR3)
518 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
519 }
520}
521
522
523/**
524 * Reset the EM state for a CPU.
525 *
526 * Called by EMR3Reset and hot plugging.
527 *
528 * @param pVCpu The cross context virtual CPU structure.
529 */
530VMMR3_INT_DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
531{
532 /* Reset scheduling state. */
533 pVCpu->em.s.fForceRAW = false;
534 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
535
536 /* VMR3ResetFF may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
537 out of the HALTED state here so that enmPrevState doesn't end up as
538 HALTED when EMR3Execute returns. */
539 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
540 {
541 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
542 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
543 }
544}
545
546
547/**
548 * Reset notification.
549 *
550 * @param pVM The cross context VM structure.
551 */
552VMMR3_INT_DECL(void) EMR3Reset(PVM pVM)
553{
554 Log(("EMR3Reset: \n"));
555 for (VMCPUID i = 0; i < pVM->cCpus; i++)
556 EMR3ResetCpu(&pVM->aCpus[i]);
557}
558
559
560/**
561 * Terminates the EM.
562 *
563 * Termination means cleaning up and freeing all resources,
564 * the VM it self is at this point powered off or suspended.
565 *
566 * @returns VBox status code.
567 * @param pVM The cross context VM structure.
568 */
569VMMR3_INT_DECL(int) EMR3Term(PVM pVM)
570{
571 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
572
573#ifdef VBOX_WITH_REM
574 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
575#else
576 RT_NOREF(pVM);
577#endif
578 return VINF_SUCCESS;
579}
580
581
582/**
583 * Execute state save operation.
584 *
585 * @returns VBox status code.
586 * @param pVM The cross context VM structure.
587 * @param pSSM SSM operation handle.
588 */
589static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
590{
591 for (VMCPUID i = 0; i < pVM->cCpus; i++)
592 {
593 PVMCPU pVCpu = &pVM->aCpus[i];
594
595 SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
596
597 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
598 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
599 SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
600
601 /* Save mwait state. */
602 SSMR3PutU32(pSSM, pVCpu->em.s.MWait.fWait);
603 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRAX);
604 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMWaitRCX);
605 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRAX);
606 SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRCX);
607 int rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.MWait.uMonitorRDX);
608 AssertRCReturn(rc, rc);
609 }
610 return VINF_SUCCESS;
611}
612
613
614/**
615 * Execute state load operation.
616 *
617 * @returns VBox status code.
618 * @param pVM The cross context VM structure.
619 * @param pSSM SSM operation handle.
620 * @param uVersion Data layout version.
621 * @param uPass The data pass.
622 */
623static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
624{
625 /*
626 * Validate version.
627 */
628 if ( uVersion > EM_SAVED_STATE_VERSION
629 || uVersion < EM_SAVED_STATE_VERSION_PRE_SMP)
630 {
631 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
632 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
633 }
634 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
635
636 /*
637 * Load the saved state.
638 */
639 for (VMCPUID i = 0; i < pVM->cCpus; i++)
640 {
641 PVMCPU pVCpu = &pVM->aCpus[i];
642
643 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
644 if (RT_FAILURE(rc))
645 pVCpu->em.s.fForceRAW = false;
646 AssertRCReturn(rc, rc);
647
648 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
649 {
650 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
651 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
652 AssertRCReturn(rc, rc);
653 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
654
655 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
656 }
657 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
658 {
659 /* Load mwait state. */
660 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.MWait.fWait);
661 AssertRCReturn(rc, rc);
662 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRAX);
663 AssertRCReturn(rc, rc);
664 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMWaitRCX);
665 AssertRCReturn(rc, rc);
666 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRAX);
667 AssertRCReturn(rc, rc);
668 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRCX);
669 AssertRCReturn(rc, rc);
670 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.MWait.uMonitorRDX);
671 AssertRCReturn(rc, rc);
672 }
673
674 Assert(!pVCpu->em.s.pCliStatTree);
675 }
676 return VINF_SUCCESS;
677}
678
679
680/**
681 * Argument packet for emR3SetExecutionPolicy.
682 */
683struct EMR3SETEXECPOLICYARGS
684{
685 EMEXECPOLICY enmPolicy;
686 bool fEnforce;
687};
688
689
690/**
691 * @callback_method_impl{FNVMMEMTRENDEZVOUS, Rendezvous callback for EMR3SetExecutionPolicy.}
692 */
693static DECLCALLBACK(VBOXSTRICTRC) emR3SetExecutionPolicy(PVM pVM, PVMCPU pVCpu, void *pvUser)
694{
695 /*
696 * Only the first CPU changes the variables.
697 */
698 if (pVCpu->idCpu == 0)
699 {
700 struct EMR3SETEXECPOLICYARGS *pArgs = (struct EMR3SETEXECPOLICYARGS *)pvUser;
701 switch (pArgs->enmPolicy)
702 {
703 case EMEXECPOLICY_RECOMPILE_RING0:
704 pVM->fRecompileSupervisor = pArgs->fEnforce;
705 break;
706 case EMEXECPOLICY_RECOMPILE_RING3:
707 pVM->fRecompileUser = pArgs->fEnforce;
708 break;
709 case EMEXECPOLICY_IEM_ALL:
710 pVM->em.s.fIemExecutesAll = pArgs->fEnforce;
711 break;
712 default:
713 AssertFailedReturn(VERR_INVALID_PARAMETER);
714 }
715 LogRel(("emR3SetExecutionPolicy: fRecompileUser=%RTbool fRecompileSupervisor=%RTbool fIemExecutesAll=%RTbool\n",
716 pVM->fRecompileUser, pVM->fRecompileSupervisor, pVM->em.s.fIemExecutesAll));
717 }
718
719 /*
720 * Force rescheduling if in RAW, HM, NEM, IEM, or REM.
721 */
722 return pVCpu->em.s.enmState == EMSTATE_RAW
723 || pVCpu->em.s.enmState == EMSTATE_HM
724 || pVCpu->em.s.enmState == EMSTATE_NEM
725 || pVCpu->em.s.enmState == EMSTATE_IEM
726 || pVCpu->em.s.enmState == EMSTATE_REM
727 || pVCpu->em.s.enmState == EMSTATE_IEM_THEN_REM
728 ? VINF_EM_RESCHEDULE
729 : VINF_SUCCESS;
730}
731
732
733/**
734 * Changes an execution scheduling policy parameter.
735 *
736 * This is used to enable or disable raw-mode / hardware-virtualization
737 * execution of user and supervisor code.
738 *
739 * @returns VINF_SUCCESS on success.
740 * @returns VINF_RESCHEDULE if a rescheduling might be required.
741 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
742 *
743 * @param pUVM The user mode VM handle.
744 * @param enmPolicy The scheduling policy to change.
745 * @param fEnforce Whether to enforce the policy or not.
746 */
747VMMR3DECL(int) EMR3SetExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool fEnforce)
748{
749 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
750 VM_ASSERT_VALID_EXT_RETURN(pUVM->pVM, VERR_INVALID_VM_HANDLE);
751 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
752
753 struct EMR3SETEXECPOLICYARGS Args = { enmPolicy, fEnforce };
754 return VMMR3EmtRendezvous(pUVM->pVM, VMMEMTRENDEZVOUS_FLAGS_TYPE_DESCENDING, emR3SetExecutionPolicy, &Args);
755}
756
757
758/**
759 * Queries an execution scheduling policy parameter.
760 *
761 * @returns VBox status code
762 * @param pUVM The user mode VM handle.
763 * @param enmPolicy The scheduling policy to query.
764 * @param pfEnforced Where to return the current value.
765 */
766VMMR3DECL(int) EMR3QueryExecutionPolicy(PUVM pUVM, EMEXECPOLICY enmPolicy, bool *pfEnforced)
767{
768 AssertReturn(enmPolicy > EMEXECPOLICY_INVALID && enmPolicy < EMEXECPOLICY_END, VERR_INVALID_PARAMETER);
769 AssertPtrReturn(pfEnforced, VERR_INVALID_POINTER);
770 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
771 PVM pVM = pUVM->pVM;
772 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
773
774 /* No need to bother EMTs with a query. */
775 switch (enmPolicy)
776 {
777 case EMEXECPOLICY_RECOMPILE_RING0:
778 *pfEnforced = pVM->fRecompileSupervisor;
779 break;
780 case EMEXECPOLICY_RECOMPILE_RING3:
781 *pfEnforced = pVM->fRecompileUser;
782 break;
783 case EMEXECPOLICY_IEM_ALL:
784 *pfEnforced = pVM->em.s.fIemExecutesAll;
785 break;
786 default:
787 AssertFailedReturn(VERR_INTERNAL_ERROR_2);
788 }
789
790 return VINF_SUCCESS;
791}
792
793
794/**
795 * Queries the main execution engine of the VM.
796 *
797 * @returns VBox status code
798 * @param pUVM The user mode VM handle.
799 * @param pbMainExecutionEngine Where to return the result, VM_EXEC_ENGINE_XXX.
800 */
801VMMR3DECL(int) EMR3QueryMainExecutionEngine(PUVM pUVM, uint8_t *pbMainExecutionEngine)
802{
803 AssertPtrReturn(pbMainExecutionEngine, VERR_INVALID_POINTER);
804 *pbMainExecutionEngine = VM_EXEC_ENGINE_NOT_SET;
805
806 UVM_ASSERT_VALID_EXT_RETURN(pUVM, VERR_INVALID_VM_HANDLE);
807 PVM pVM = pUVM->pVM;
808 VM_ASSERT_VALID_EXT_RETURN(pVM, VERR_INVALID_VM_HANDLE);
809
810 *pbMainExecutionEngine = pVM->bMainExecutionEngine;
811 return VINF_SUCCESS;
812}
813
814
815/**
816 * Raise a fatal error.
817 *
818 * Safely terminate the VM with full state report and stuff. This function
819 * will naturally never return.
820 *
821 * @param pVCpu The cross context virtual CPU structure.
822 * @param rc VBox status code.
823 */
824VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
825{
826 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
827 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
828}
829
830
831#if defined(LOG_ENABLED) || defined(VBOX_STRICT)
832/**
833 * Gets the EM state name.
834 *
835 * @returns pointer to read only state name,
836 * @param enmState The state.
837 */
838static const char *emR3GetStateName(EMSTATE enmState)
839{
840 switch (enmState)
841 {
842 case EMSTATE_NONE: return "EMSTATE_NONE";
843 case EMSTATE_RAW: return "EMSTATE_RAW";
844 case EMSTATE_HM: return "EMSTATE_HM";
845 case EMSTATE_IEM: return "EMSTATE_IEM";
846 case EMSTATE_REM: return "EMSTATE_REM";
847 case EMSTATE_HALTED: return "EMSTATE_HALTED";
848 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
849 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
850 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
851 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
852 case EMSTATE_DEBUG_GUEST_HM: return "EMSTATE_DEBUG_GUEST_HM";
853 case EMSTATE_DEBUG_GUEST_IEM: return "EMSTATE_DEBUG_GUEST_IEM";
854 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
855 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
856 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
857 case EMSTATE_IEM_THEN_REM: return "EMSTATE_IEM_THEN_REM";
858 case EMSTATE_NEM: return "EMSTATE_NEM";
859 case EMSTATE_DEBUG_GUEST_NEM: return "EMSTATE_DEBUG_GUEST_NEM";
860 default: return "Unknown!";
861 }
862}
863#endif /* LOG_ENABLED || VBOX_STRICT */
864
865
866/**
867 * Handle pending ring-3 I/O port write.
868 *
869 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
870 * by EMRZSetPendingIoPortWrite() in ring-0 or raw-mode context.
871 *
872 * @returns Strict VBox status code.
873 * @param pVM The cross context VM structure.
874 * @param pVCpu The cross context virtual CPU structure.
875 */
876VBOXSTRICTRC emR3ExecutePendingIoPortWrite(PVM pVM, PVMCPU pVCpu)
877{
878 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS);
879
880 /* Get and clear the pending data. */
881 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
882 uint32_t const uValue = pVCpu->em.s.PendingIoPortAccess.uValue;
883 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
884 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
885 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
886
887 /* Assert sanity. */
888 switch (cbValue)
889 {
890 case 1: Assert(!(cbValue & UINT32_C(0xffffff00))); break;
891 case 2: Assert(!(cbValue & UINT32_C(0xffff0000))); break;
892 case 4: break;
893 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
894 }
895 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
896
897 /* Do the work.*/
898 VBOXSTRICTRC rcStrict = IOMIOPortWrite(pVM, pVCpu, uPort, uValue, cbValue);
899 LogFlow(("EM/OUT: %#x, %#x LB %u -> %Rrc\n", uPort, uValue, cbValue, VBOXSTRICTRC_VAL(rcStrict) ));
900 if (IOM_SUCCESS(rcStrict))
901 {
902 pVCpu->cpum.GstCtx.rip += cbInstr;
903 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
904 }
905 return rcStrict;
906}
907
908
909/**
910 * Handle pending ring-3 I/O port write.
911 *
912 * This is in response to a VINF_EM_PENDING_R3_IOPORT_WRITE status code returned
913 * by EMRZSetPendingIoPortRead() in ring-0 or raw-mode context.
914 *
915 * @returns Strict VBox status code.
916 * @param pVM The cross context VM structure.
917 * @param pVCpu The cross context virtual CPU structure.
918 */
919VBOXSTRICTRC emR3ExecutePendingIoPortRead(PVM pVM, PVMCPU pVCpu)
920{
921 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP | CPUMCTX_EXTRN_RFLAGS | CPUMCTX_EXTRN_RAX);
922
923 /* Get and clear the pending data. */
924 RTIOPORT const uPort = pVCpu->em.s.PendingIoPortAccess.uPort;
925 uint8_t const cbValue = pVCpu->em.s.PendingIoPortAccess.cbValue;
926 uint8_t const cbInstr = pVCpu->em.s.PendingIoPortAccess.cbInstr;
927 pVCpu->em.s.PendingIoPortAccess.cbValue = 0;
928
929 /* Assert sanity. */
930 switch (cbValue)
931 {
932 case 1: break;
933 case 2: break;
934 case 4: break;
935 default: AssertMsgFailedReturn(("cbValue=%#x\n", cbValue), VERR_EM_INTERNAL_ERROR);
936 }
937 AssertReturn(pVCpu->em.s.PendingIoPortAccess.uValue == UINT32_C(0x52454144) /* READ*/, VERR_EM_INTERNAL_ERROR);
938 AssertReturn(cbInstr <= 15 && cbInstr >= 1, VERR_EM_INTERNAL_ERROR);
939
940 /* Do the work.*/
941 uint32_t uValue = 0;
942 VBOXSTRICTRC rcStrict = IOMIOPortRead(pVM, pVCpu, uPort, &uValue, cbValue);
943 LogFlow(("EM/IN: %#x LB %u -> %Rrc, %#x\n", uPort, cbValue, VBOXSTRICTRC_VAL(rcStrict), uValue ));
944 if (IOM_SUCCESS(rcStrict))
945 {
946 if (cbValue == 4)
947 pVCpu->cpum.GstCtx.rax = uValue;
948 else if (cbValue == 2)
949 pVCpu->cpum.GstCtx.ax = (uint16_t)uValue;
950 else
951 pVCpu->cpum.GstCtx.al = (uint8_t)uValue;
952 pVCpu->cpum.GstCtx.rip += cbInstr;
953 pVCpu->cpum.GstCtx.rflags.Bits.u1RF = 0;
954 }
955 return rcStrict;
956}
957
958
959/**
960 * Debug loop.
961 *
962 * @returns VBox status code for EM.
963 * @param pVM The cross context VM structure.
964 * @param pVCpu The cross context virtual CPU structure.
965 * @param rc Current EM VBox status code.
966 */
967static VBOXSTRICTRC emR3Debug(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
968{
969 for (;;)
970 {
971 Log(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
972 const VBOXSTRICTRC rcLast = rc;
973
974 /*
975 * Debug related RC.
976 */
977 switch (VBOXSTRICTRC_VAL(rc))
978 {
979 /*
980 * Single step an instruction.
981 */
982 case VINF_EM_DBG_STEP:
983 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
984 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
985 || pVCpu->em.s.fForceRAW /* paranoia */)
986#ifdef VBOX_WITH_RAW_MODE
987 rc = emR3RawStep(pVM, pVCpu);
988#else
989 AssertLogRelMsgFailedStmt(("Bad EM state."), VERR_EM_INTERNAL_ERROR);
990#endif
991 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
992 rc = EMR3HmSingleInstruction(pVM, pVCpu, 0 /*fFlags*/);
993 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_NEM)
994 rc = VBOXSTRICTRC_TODO(emR3NemSingleInstruction(pVM, pVCpu, 0 /*fFlags*/));
995#ifdef VBOX_WITH_REM
996 else if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM)
997 rc = emR3RemStep(pVM, pVCpu);
998#endif
999 else
1000 {
1001 rc = IEMExecOne(pVCpu); /** @todo add dedicated interface... */
1002 if (rc == VINF_SUCCESS || rc == VINF_EM_RESCHEDULE)
1003 rc = VINF_EM_DBG_STEPPED;
1004 }
1005 break;
1006
1007 /*
1008 * Simple events: stepped, breakpoint, stop/assertion.
1009 */
1010 case VINF_EM_DBG_STEPPED:
1011 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
1012 break;
1013
1014 case VINF_EM_DBG_BREAKPOINT:
1015 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
1016 break;
1017
1018 case VINF_EM_DBG_STOP:
1019 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
1020 break;
1021
1022 case VINF_EM_DBG_EVENT:
1023 rc = DBGFR3EventHandlePending(pVM, pVCpu);
1024 break;
1025
1026 case VINF_EM_DBG_HYPER_STEPPED:
1027 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
1028 break;
1029
1030 case VINF_EM_DBG_HYPER_BREAKPOINT:
1031 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
1032 break;
1033
1034 case VINF_EM_DBG_HYPER_ASSERTION:
1035 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1036 RTLogFlush(NULL);
1037 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
1038 break;
1039
1040 /*
1041 * Guru meditation.
1042 */
1043 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
1044 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
1045 break;
1046 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
1047 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
1048 break;
1049 case VINF_EM_TRIPLE_FAULT: /** @todo Make a guru meditation event! */
1050 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VINF_EM_TRIPLE_FAULT", 0, NULL, NULL);
1051 break;
1052
1053 default: /** @todo don't use default for guru, but make special errors code! */
1054 {
1055 LogRel(("emR3Debug: rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
1056 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
1057 break;
1058 }
1059 }
1060
1061 /*
1062 * Process the result.
1063 */
1064 switch (VBOXSTRICTRC_VAL(rc))
1065 {
1066 /*
1067 * Continue the debugging loop.
1068 */
1069 case VINF_EM_DBG_STEP:
1070 case VINF_EM_DBG_STOP:
1071 case VINF_EM_DBG_EVENT:
1072 case VINF_EM_DBG_STEPPED:
1073 case VINF_EM_DBG_BREAKPOINT:
1074 case VINF_EM_DBG_HYPER_STEPPED:
1075 case VINF_EM_DBG_HYPER_BREAKPOINT:
1076 case VINF_EM_DBG_HYPER_ASSERTION:
1077 break;
1078
1079 /*
1080 * Resuming execution (in some form) has to be done here if we got
1081 * a hypervisor debug event.
1082 */
1083 case VINF_SUCCESS:
1084 case VINF_EM_RESUME:
1085 case VINF_EM_SUSPEND:
1086 case VINF_EM_RESCHEDULE:
1087 case VINF_EM_RESCHEDULE_RAW:
1088 case VINF_EM_RESCHEDULE_REM:
1089 case VINF_EM_HALT:
1090 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
1091 {
1092#ifdef VBOX_WITH_RAW_MODE
1093 rc = emR3RawResumeHyper(pVM, pVCpu);
1094 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
1095 continue;
1096#else
1097 AssertLogRelMsgFailedReturn(("Not implemented\n"), VERR_EM_INTERNAL_ERROR);
1098#endif
1099 }
1100 if (rc == VINF_SUCCESS)
1101 rc = VINF_EM_RESCHEDULE;
1102 return rc;
1103
1104 /*
1105 * The debugger isn't attached.
1106 * We'll simply turn the thing off since that's the easiest thing to do.
1107 */
1108 case VERR_DBGF_NOT_ATTACHED:
1109 switch (VBOXSTRICTRC_VAL(rcLast))
1110 {
1111 case VINF_EM_DBG_HYPER_STEPPED:
1112 case VINF_EM_DBG_HYPER_BREAKPOINT:
1113 case VINF_EM_DBG_HYPER_ASSERTION:
1114 case VERR_TRPM_PANIC:
1115 case VERR_TRPM_DONT_PANIC:
1116 case VERR_VMM_RING0_ASSERTION:
1117 case VERR_VMM_HYPER_CR3_MISMATCH:
1118 case VERR_VMM_RING3_CALL_DISABLED:
1119 return rcLast;
1120 }
1121 return VINF_EM_OFF;
1122
1123 /*
1124 * Status codes terminating the VM in one or another sense.
1125 */
1126 case VINF_EM_TERMINATE:
1127 case VINF_EM_OFF:
1128 case VINF_EM_RESET:
1129 case VINF_EM_NO_MEMORY:
1130 case VINF_EM_RAW_STALE_SELECTOR:
1131 case VINF_EM_RAW_IRET_TRAP:
1132 case VERR_TRPM_PANIC:
1133 case VERR_TRPM_DONT_PANIC:
1134 case VERR_IEM_INSTR_NOT_IMPLEMENTED:
1135 case VERR_IEM_ASPECT_NOT_IMPLEMENTED:
1136 case VERR_VMM_RING0_ASSERTION:
1137 case VERR_VMM_HYPER_CR3_MISMATCH:
1138 case VERR_VMM_RING3_CALL_DISABLED:
1139 case VERR_INTERNAL_ERROR:
1140 case VERR_INTERNAL_ERROR_2:
1141 case VERR_INTERNAL_ERROR_3:
1142 case VERR_INTERNAL_ERROR_4:
1143 case VERR_INTERNAL_ERROR_5:
1144 case VERR_IPE_UNEXPECTED_STATUS:
1145 case VERR_IPE_UNEXPECTED_INFO_STATUS:
1146 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
1147 return rc;
1148
1149 /*
1150 * The rest is unexpected, and will keep us here.
1151 */
1152 default:
1153 AssertMsgFailed(("Unexpected rc %Rrc!\n", VBOXSTRICTRC_VAL(rc)));
1154 break;
1155 }
1156 } /* debug for ever */
1157}
1158
1159
1160#if defined(VBOX_WITH_REM) || defined(DEBUG)
1161/**
1162 * Steps recompiled code.
1163 *
1164 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
1165 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1166 *
1167 * @param pVM The cross context VM structure.
1168 * @param pVCpu The cross context virtual CPU structure.
1169 */
1170static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
1171{
1172 Log3(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1173
1174# ifdef VBOX_WITH_REM
1175 EMRemLock(pVM);
1176
1177 /*
1178 * Switch to REM, step instruction, switch back.
1179 */
1180 int rc = REMR3State(pVM, pVCpu);
1181 if (RT_SUCCESS(rc))
1182 {
1183 rc = REMR3Step(pVM, pVCpu);
1184 REMR3StateBack(pVM, pVCpu);
1185 }
1186 EMRemUnlock(pVM);
1187
1188# else
1189 int rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu)); NOREF(pVM);
1190# endif
1191
1192 Log3(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1193 return rc;
1194}
1195#endif /* VBOX_WITH_REM || DEBUG */
1196
1197
1198#ifdef VBOX_WITH_REM
1199/**
1200 * emR3RemExecute helper that syncs the state back from REM and leave the REM
1201 * critical section.
1202 *
1203 * @returns false - new fInREMState value.
1204 * @param pVM The cross context VM structure.
1205 * @param pVCpu The cross context virtual CPU structure.
1206 */
1207DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
1208{
1209 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
1210 REMR3StateBack(pVM, pVCpu);
1211 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
1212
1213 EMRemUnlock(pVM);
1214 return false;
1215}
1216#endif
1217
1218
1219/**
1220 * Executes recompiled code.
1221 *
1222 * This function contains the recompiler version of the inner
1223 * execution loop (the outer loop being in EMR3ExecuteVM()).
1224 *
1225 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1226 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1227 *
1228 * @param pVM The cross context VM structure.
1229 * @param pVCpu The cross context virtual CPU structure.
1230 * @param pfFFDone Where to store an indicator telling whether or not
1231 * FFs were done before returning.
1232 *
1233 */
1234static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1235{
1236#ifdef LOG_ENABLED
1237 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1238 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
1239
1240 if (pCtx->eflags.Bits.u1VM)
1241 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs.Sel, pCtx->eip, pCtx->eflags.Bits.u1IF));
1242 else
1243 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
1244#endif
1245 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
1246
1247#if defined(VBOX_STRICT) && defined(DEBUG_bird)
1248 AssertMsg( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1249 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo @bugref{1419} - get flat address. */
1250 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
1251#endif
1252
1253 /*
1254 * Spin till we get a forced action which returns anything but VINF_SUCCESS
1255 * or the REM suggests raw-mode execution.
1256 */
1257 *pfFFDone = false;
1258#ifdef VBOX_WITH_REM
1259 bool fInREMState = false;
1260#else
1261 uint32_t cLoops = 0;
1262#endif
1263 int rc = VINF_SUCCESS;
1264 for (;;)
1265 {
1266#ifdef VBOX_WITH_REM
1267 /*
1268 * Lock REM and update the state if not already in sync.
1269 *
1270 * Note! Big lock, but you are not supposed to own any lock when
1271 * coming in here.
1272 */
1273 if (!fInREMState)
1274 {
1275 EMRemLock(pVM);
1276 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
1277
1278 /* Flush the recompiler translation blocks if the VCPU has changed,
1279 also force a full CPU state resync. */
1280 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
1281 {
1282 REMFlushTBs(pVM);
1283 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
1284 }
1285 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
1286
1287 rc = REMR3State(pVM, pVCpu);
1288
1289 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
1290 if (RT_FAILURE(rc))
1291 break;
1292 fInREMState = true;
1293
1294 /*
1295 * We might have missed the raising of VMREQ, TIMER and some other
1296 * important FFs while we were busy switching the state. So, check again.
1297 */
1298 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
1299 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
1300 {
1301 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
1302 goto l_REMDoForcedActions;
1303 }
1304 }
1305#endif
1306
1307 /*
1308 * Execute REM.
1309 */
1310 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
1311 {
1312 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
1313#ifdef VBOX_WITH_REM
1314 rc = REMR3Run(pVM, pVCpu);
1315#else
1316 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
1317#endif
1318 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
1319 }
1320 else
1321 {
1322 /* Give up this time slice; virtual time continues */
1323 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
1324 RTThreadSleep(5);
1325 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
1326 rc = VINF_SUCCESS;
1327 }
1328
1329 /*
1330 * Deal with high priority post execution FFs before doing anything
1331 * else. Sync back the state and leave the lock to be on the safe side.
1332 */
1333 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1334 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1335 {
1336#ifdef VBOX_WITH_REM
1337 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1338#endif
1339 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
1340 }
1341
1342 /*
1343 * Process the returned status code.
1344 */
1345 if (rc != VINF_SUCCESS)
1346 {
1347 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1348 break;
1349 if (rc != VINF_REM_INTERRUPED_FF)
1350 {
1351#ifndef VBOX_WITH_REM
1352 /* Try dodge unimplemented IEM trouble by reschduling. */
1353 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1354 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1355 {
1356 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1357 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1358 {
1359 rc = VINF_EM_RESCHEDULE;
1360 break;
1361 }
1362 }
1363#endif
1364
1365 /*
1366 * Anything which is not known to us means an internal error
1367 * and the termination of the VM!
1368 */
1369 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
1370 break;
1371 }
1372 }
1373
1374
1375 /*
1376 * Check and execute forced actions.
1377 *
1378 * Sync back the VM state and leave the lock before calling any of
1379 * these, you never know what's going to happen here.
1380 */
1381#ifdef VBOX_HIGH_RES_TIMERS_HACK
1382 TMTimerPollVoid(pVM, pVCpu);
1383#endif
1384 AssertCompile(VMCPU_FF_ALL_REM_MASK & VMCPU_FF_TIMER);
1385 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1386 || VMCPU_FF_IS_PENDING(pVCpu,
1387 VMCPU_FF_ALL_REM_MASK
1388 & VM_WHEN_RAW_MODE(~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE), UINT32_MAX)) )
1389 {
1390#ifdef VBOX_WITH_REM
1391l_REMDoForcedActions:
1392 if (fInREMState)
1393 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1394#endif
1395 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
1396 rc = emR3ForcedActions(pVM, pVCpu, rc);
1397 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
1398 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1399 if ( rc != VINF_SUCCESS
1400 && rc != VINF_EM_RESCHEDULE_REM)
1401 {
1402 *pfFFDone = true;
1403 break;
1404 }
1405 }
1406
1407#ifndef VBOX_WITH_REM
1408 /*
1409 * Have to check if we can get back to fast execution mode every so often.
1410 */
1411 if (!(++cLoops & 7))
1412 {
1413 EMSTATE enmCheck = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1414 if ( enmCheck != EMSTATE_REM
1415 && enmCheck != EMSTATE_IEM_THEN_REM)
1416 return VINF_EM_RESCHEDULE;
1417 }
1418#endif
1419
1420 } /* The Inner Loop, recompiled execution mode version. */
1421
1422
1423#ifdef VBOX_WITH_REM
1424 /*
1425 * Returning. Sync back the VM state if required.
1426 */
1427 if (fInREMState)
1428 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1429#endif
1430
1431 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1432 return rc;
1433}
1434
1435
1436#ifdef DEBUG
1437
1438int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1439{
1440 EMSTATE enmOldState = pVCpu->em.s.enmState;
1441
1442 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1443
1444 Log(("Single step BEGIN:\n"));
1445 for (uint32_t i = 0; i < cIterations; i++)
1446 {
1447 DBGFR3PrgStep(pVCpu);
1448 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, "RSS");
1449 emR3RemStep(pVM, pVCpu);
1450 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1451 break;
1452 }
1453 Log(("Single step END:\n"));
1454 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1455 pVCpu->em.s.enmState = enmOldState;
1456 return VINF_EM_RESCHEDULE;
1457}
1458
1459#endif /* DEBUG */
1460
1461
1462/**
1463 * Try execute the problematic code in IEM first, then fall back on REM if there
1464 * is too much of it or if IEM doesn't implement something.
1465 *
1466 * @returns Strict VBox status code from IEMExecLots.
1467 * @param pVM The cross context VM structure.
1468 * @param pVCpu The cross context virtual CPU structure of the calling EMT.
1469 * @param pfFFDone Force flags done indicator.
1470 *
1471 * @thread EMT(pVCpu)
1472 */
1473static VBOXSTRICTRC emR3ExecuteIemThenRem(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1474{
1475 LogFlow(("emR3ExecuteIemThenRem: %04x:%RGv\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestRIP(pVCpu)));
1476 *pfFFDone = false;
1477
1478 /*
1479 * Execute in IEM for a while.
1480 */
1481 while (pVCpu->em.s.cIemThenRemInstructions < 1024)
1482 {
1483 uint32_t cInstructions;
1484 VBOXSTRICTRC rcStrict = IEMExecLots(pVCpu, &cInstructions);
1485 pVCpu->em.s.cIemThenRemInstructions += cInstructions;
1486 if (rcStrict != VINF_SUCCESS)
1487 {
1488 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
1489 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
1490 break;
1491
1492 Log(("emR3ExecuteIemThenRem: returns %Rrc after %u instructions\n",
1493 VBOXSTRICTRC_VAL(rcStrict), pVCpu->em.s.cIemThenRemInstructions));
1494 return rcStrict;
1495 }
1496
1497 EMSTATE enmNewState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1498 if (enmNewState != EMSTATE_REM && enmNewState != EMSTATE_IEM_THEN_REM)
1499 {
1500 LogFlow(("emR3ExecuteIemThenRem: -> %d (%s) after %u instructions\n",
1501 enmNewState, emR3GetStateName(enmNewState), pVCpu->em.s.cIemThenRemInstructions));
1502 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1503 pVCpu->em.s.enmState = enmNewState;
1504 return VINF_SUCCESS;
1505 }
1506
1507 /*
1508 * Check for pending actions.
1509 */
1510 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
1511 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT))
1512 return VINF_SUCCESS;
1513 }
1514
1515 /*
1516 * Switch to REM.
1517 */
1518 Log(("emR3ExecuteIemThenRem: -> EMSTATE_REM (after %u instructions)\n", pVCpu->em.s.cIemThenRemInstructions));
1519 pVCpu->em.s.enmState = EMSTATE_REM;
1520 return VINF_SUCCESS;
1521}
1522
1523
1524/**
1525 * Decides whether to execute RAW, HWACC or REM.
1526 *
1527 * @returns new EM state
1528 * @param pVM The cross context VM structure.
1529 * @param pVCpu The cross context virtual CPU structure.
1530 * @param pCtx Pointer to the guest CPU context.
1531 */
1532EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1533{
1534 /*
1535 * When forcing raw-mode execution, things are simple.
1536 */
1537 if (pVCpu->em.s.fForceRAW)
1538 return EMSTATE_RAW;
1539
1540 /*
1541 * We stay in the wait for SIPI state unless explicitly told otherwise.
1542 */
1543 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1544 return EMSTATE_WAIT_SIPI;
1545
1546 /*
1547 * Execute everything in IEM?
1548 */
1549 if (pVM->em.s.fIemExecutesAll)
1550 return EMSTATE_IEM;
1551
1552 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1553 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1554 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1555
1556 X86EFLAGS EFlags = pCtx->eflags;
1557 if (!VM_IS_RAW_MODE_ENABLED(pVM))
1558 {
1559 if (EMIsHwVirtExecutionEnabled(pVM))
1560 {
1561 if (VM_IS_HM_ENABLED(pVM))
1562 {
1563 if (HMR3CanExecuteGuest(pVM, pCtx))
1564 return EMSTATE_HM;
1565 }
1566 else if (NEMR3CanExecuteGuest(pVM, pVCpu, pCtx))
1567 return EMSTATE_NEM;
1568
1569 /*
1570 * Note! Raw mode and hw accelerated mode are incompatible. The latter
1571 * turns off monitoring features essential for raw mode!
1572 */
1573 return EMSTATE_IEM_THEN_REM;
1574 }
1575 }
1576
1577 /*
1578 * Standard raw-mode:
1579 *
1580 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1581 * or 32 bits protected mode ring 0 code
1582 *
1583 * The tests are ordered by the likelihood of being true during normal execution.
1584 */
1585 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1586 {
1587 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1588 return EMSTATE_REM;
1589 }
1590
1591# ifndef VBOX_RAW_V86
1592 if (EFlags.u32 & X86_EFL_VM) {
1593 Log2(("raw mode refused: VM_MASK\n"));
1594 return EMSTATE_REM;
1595 }
1596# endif
1597
1598 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1599 uint32_t u32CR0 = pCtx->cr0;
1600 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1601 {
1602 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1603 return EMSTATE_REM;
1604 }
1605
1606 if (pCtx->cr4 & X86_CR4_PAE)
1607 {
1608 uint32_t u32Dummy, u32Features;
1609
1610 CPUMGetGuestCpuId(pVCpu, 1, 0, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1611 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1612 return EMSTATE_REM;
1613 }
1614
1615 unsigned uSS = pCtx->ss.Sel;
1616 if ( pCtx->eflags.Bits.u1VM
1617 || (uSS & X86_SEL_RPL) == 3)
1618 {
1619 if (!EMIsRawRing3Enabled(pVM))
1620 return EMSTATE_REM;
1621
1622 if (!(EFlags.u32 & X86_EFL_IF))
1623 {
1624 Log2(("raw mode refused: IF (RawR3)\n"));
1625 return EMSTATE_REM;
1626 }
1627
1628 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1629 {
1630 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1631 return EMSTATE_REM;
1632 }
1633 }
1634 else
1635 {
1636 if (!EMIsRawRing0Enabled(pVM))
1637 return EMSTATE_REM;
1638
1639 if (EMIsRawRing1Enabled(pVM))
1640 {
1641 /* Only ring 0 and 1 supervisor code. */
1642 if ((uSS & X86_SEL_RPL) == 2) /* ring 1 code is moved into ring 2, so we can't support ring-2 in that case. */
1643 {
1644 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1645 return EMSTATE_REM;
1646 }
1647 }
1648 /* Only ring 0 supervisor code. */
1649 else if ((uSS & X86_SEL_RPL) != 0)
1650 {
1651 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1652 return EMSTATE_REM;
1653 }
1654
1655 // Let's start with pure 32 bits ring 0 code first
1656 /** @todo What's pure 32-bit mode? flat? */
1657 if ( !(pCtx->ss.Attr.n.u1DefBig)
1658 || !(pCtx->cs.Attr.n.u1DefBig))
1659 {
1660 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1661 return EMSTATE_REM;
1662 }
1663
1664 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1665 if (!(u32CR0 & X86_CR0_WP))
1666 {
1667 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1668 return EMSTATE_REM;
1669 }
1670
1671# ifdef VBOX_WITH_RAW_MODE
1672 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1673 {
1674 Log2(("raw r0 mode forced: patch code\n"));
1675# ifdef VBOX_WITH_SAFE_STR
1676 Assert(pCtx->tr.Sel);
1677# endif
1678 return EMSTATE_RAW;
1679 }
1680# endif /* VBOX_WITH_RAW_MODE */
1681
1682# if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1683 if (!(EFlags.u32 & X86_EFL_IF))
1684 {
1685 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1686 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1687 return EMSTATE_REM;
1688 }
1689# endif
1690
1691# ifndef VBOX_WITH_RAW_RING1
1692 /** @todo still necessary??? */
1693 if (EFlags.Bits.u2IOPL != 0)
1694 {
1695 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1696 return EMSTATE_REM;
1697 }
1698# endif
1699 }
1700
1701 /*
1702 * Stale hidden selectors means raw-mode is unsafe (being very careful).
1703 */
1704 if (pCtx->cs.fFlags & CPUMSELREG_FLAGS_STALE)
1705 {
1706 Log2(("raw mode refused: stale CS\n"));
1707 return EMSTATE_REM;
1708 }
1709 if (pCtx->ss.fFlags & CPUMSELREG_FLAGS_STALE)
1710 {
1711 Log2(("raw mode refused: stale SS\n"));
1712 return EMSTATE_REM;
1713 }
1714 if (pCtx->ds.fFlags & CPUMSELREG_FLAGS_STALE)
1715 {
1716 Log2(("raw mode refused: stale DS\n"));
1717 return EMSTATE_REM;
1718 }
1719 if (pCtx->es.fFlags & CPUMSELREG_FLAGS_STALE)
1720 {
1721 Log2(("raw mode refused: stale ES\n"));
1722 return EMSTATE_REM;
1723 }
1724 if (pCtx->fs.fFlags & CPUMSELREG_FLAGS_STALE)
1725 {
1726 Log2(("raw mode refused: stale FS\n"));
1727 return EMSTATE_REM;
1728 }
1729 if (pCtx->gs.fFlags & CPUMSELREG_FLAGS_STALE)
1730 {
1731 Log2(("raw mode refused: stale GS\n"));
1732 return EMSTATE_REM;
1733 }
1734
1735# ifdef VBOX_WITH_SAFE_STR
1736 if (pCtx->tr.Sel == 0)
1737 {
1738 Log(("Raw mode refused -> TR=0\n"));
1739 return EMSTATE_REM;
1740 }
1741# endif
1742
1743 /*Assert(PGMPhysIsA20Enabled(pVCpu));*/
1744 return EMSTATE_RAW;
1745}
1746
1747
1748/**
1749 * Executes all high priority post execution force actions.
1750 *
1751 * @returns Strict VBox status code. Typically @a rc, but may be upgraded to
1752 * fatal error status code.
1753 *
1754 * @param pVM The cross context VM structure.
1755 * @param pVCpu The cross context virtual CPU structure.
1756 * @param rc The current strict VBox status code rc.
1757 */
1758VBOXSTRICTRC emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc)
1759{
1760 VBOXVMM_EM_FF_HIGH(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, VBOXSTRICTRC_VAL(rc));
1761
1762 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1763 PDMCritSectBothFF(pVCpu);
1764
1765 /* Update CR3 (Nested Paging case for HM). */
1766 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
1767 {
1768 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1769 int rc2 = PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
1770 if (RT_FAILURE(rc2))
1771 return rc2;
1772 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
1773 }
1774
1775 /* Update PAE PDPEs. This must be done *after* PGMUpdateCR3() and used only by the Nested Paging case for HM. */
1776 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
1777 {
1778 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4 | CPUMCTX_EXTRN_EFER, rc);
1779 if (CPUMIsGuestInPAEMode(pVCpu))
1780 {
1781 PX86PDPE pPdpes = HMGetPaePdpes(pVCpu);
1782 AssertPtr(pPdpes);
1783
1784 PGMGstUpdatePaePdpes(pVCpu, pPdpes);
1785 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
1786 }
1787 else
1788 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
1789 }
1790
1791 /* IEM has pending work (typically memory write after INS instruction). */
1792 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IEM))
1793 rc = IEMR3ProcessForceFlag(pVM, pVCpu, rc);
1794
1795 /* IOM has pending work (comitting an I/O or MMIO write). */
1796 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_IOM))
1797 {
1798 rc = IOMR3ProcessForceFlag(pVM, pVCpu, rc);
1799 if (pVCpu->em.s.idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
1800 { /* half likely, or at least it's a line shorter. */ }
1801 else if (rc == VINF_SUCCESS)
1802 rc = VINF_EM_RESUME_R3_HISTORY_EXEC;
1803 else
1804 pVCpu->em.s.idxContinueExitRec = UINT16_MAX;
1805 }
1806
1807#ifdef VBOX_WITH_RAW_MODE
1808 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1809 CSAMR3DoPendingAction(pVM, pVCpu);
1810#endif
1811
1812 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
1813 {
1814 if ( rc > VINF_EM_NO_MEMORY
1815 && rc <= VINF_EM_LAST)
1816 rc = VINF_EM_NO_MEMORY;
1817 }
1818
1819 return rc;
1820}
1821
1822#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
1823/**
1824 * Helper for emR3ForcedActions() for injecting interrupts into the
1825 * nested-guest.
1826 *
1827 * @returns VBox status code.
1828 * @param pVCpu The cross context virtual CPU structure.
1829 * @param pCtx Pointer to the nested-guest CPU context.
1830 * @param pfResched Where to store whether a reschedule is required.
1831 * @param pfInject Where to store whether an interrupt was injected (and if
1832 * a wake up is pending).
1833 */
1834static int emR3NstGstInjectIntr(PVMCPU pVCpu, PCPUMCTX pCtx, bool *pfResched, bool *pfInject)
1835{
1836 *pfResched = false;
1837 *pfInject = false;
1838 if (CPUMIsGuestInSvmNestedHwVirtMode(pCtx))
1839 {
1840 PVM pVM = pVCpu->CTX_SUFF(pVM);
1841 Assert(pCtx->hwvirt.fGif);
1842 bool fVirtualGif = CPUMGetSvmNstGstVGif(pCtx);
1843#ifdef VBOX_WITH_RAW_MODE
1844 fVirtualGif &= !PATMIsPatchGCAddr(pVM, pCtx->eip);
1845#endif
1846 if (fVirtualGif)
1847 {
1848 if (CPUMCanSvmNstGstTakePhysIntr(pVCpu, pCtx))
1849 {
1850 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1851 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1852 {
1853 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_INTR))
1854 {
1855 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_INTR, 0, 0);
1856 if (RT_SUCCESS(rcStrict))
1857 {
1858 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1859 * doesn't intercept HLT but intercepts INTR? */
1860 *pfResched = true;
1861 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1862 if (rcStrict == VINF_SVM_VMEXIT)
1863 return VINF_SUCCESS;
1864 return VBOXSTRICTRC_VAL(rcStrict);
1865 }
1866
1867 AssertMsgFailed(("INTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1868 return VINF_EM_TRIPLE_FAULT;
1869 }
1870
1871 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1872 /** @todo this really isn't nice, should properly handle this */
1873 int rc = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1874 Assert(rc != VINF_PGM_CHANGE_MODE);
1875 if (rc == VINF_SVM_VMEXIT)
1876 rc = VINF_SUCCESS;
1877 if (pVM->em.s.fIemExecutesAll && ( rc == VINF_EM_RESCHEDULE_REM
1878 || rc == VINF_EM_RESCHEDULE_HM
1879 || rc == VINF_EM_RESCHEDULE_RAW))
1880 {
1881 rc = VINF_EM_RESCHEDULE;
1882 }
1883
1884 *pfResched = true;
1885 *pfInject = true;
1886 return rc;
1887 }
1888 }
1889
1890 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST)
1891 && CPUMCanSvmNstGstTakeVirtIntr(pVCpu, pCtx))
1892 {
1893 if (CPUMIsGuestSvmCtrlInterceptSet(pVCpu, pCtx, SVM_CTRL_INTERCEPT_VINTR))
1894 {
1895 VBOXSTRICTRC rcStrict = IEMExecSvmVmexit(pVCpu, SVM_EXIT_VINTR, 0, 0);
1896 if (RT_SUCCESS(rcStrict))
1897 {
1898 /** @todo r=ramshankar: Do we need to signal a wakeup here? If a nested-guest
1899 * doesn't intercept HLT but intercepts VINTR? */
1900 *pfResched = true;
1901 Assert(rcStrict != VINF_PGM_CHANGE_MODE);
1902 if (rcStrict == VINF_SVM_VMEXIT)
1903 return VINF_SUCCESS;
1904 return VBOXSTRICTRC_VAL(rcStrict);
1905 }
1906
1907 AssertMsgFailed(("VINTR #VMEXIT failed! rc=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
1908 return VINF_EM_TRIPLE_FAULT;
1909 }
1910
1911 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NESTED_GUEST);
1912 uint8_t const uNstGstVector = CPUMGetSvmNstGstInterrupt(pCtx);
1913 AssertMsg(uNstGstVector > 0 && uNstGstVector <= X86_XCPT_LAST, ("Invalid VINTR vector %#x\n", uNstGstVector));
1914 TRPMAssertTrap(pVCpu, uNstGstVector, TRPM_HARDWARE_INT);
1915 Log(("EM: Asserting nested-guest virt. hardware intr: %#x\n", uNstGstVector));
1916
1917 *pfResched = true;
1918 *pfInject = true;
1919 return VINF_EM_RESCHEDULE;
1920 }
1921 }
1922 return VINF_SUCCESS;
1923 }
1924
1925 if (CPUMIsGuestInVmxNestedHwVirtMode(pCtx))
1926 { /** @todo Nested VMX. */ }
1927
1928 /* Shouldn't really get here. */
1929 AssertMsgFailed(("Unrecognized nested hwvirt. arch!\n"));
1930 return VERR_EM_INTERNAL_ERROR;
1931}
1932#endif
1933
1934/**
1935 * Executes all pending forced actions.
1936 *
1937 * Forced actions can cause execution delays and execution
1938 * rescheduling. The first we deal with using action priority, so
1939 * that for instance pending timers aren't scheduled and ran until
1940 * right before execution. The rescheduling we deal with using
1941 * return codes. The same goes for VM termination, only in that case
1942 * we exit everything.
1943 *
1944 * @returns VBox status code of equal or greater importance/severity than rc.
1945 * The most important ones are: VINF_EM_RESCHEDULE,
1946 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1947 *
1948 * @param pVM The cross context VM structure.
1949 * @param pVCpu The cross context virtual CPU structure.
1950 * @param rc The current rc.
1951 *
1952 */
1953int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1954{
1955 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1956#ifdef VBOX_STRICT
1957 int rcIrq = VINF_SUCCESS;
1958#endif
1959 int rc2;
1960#define UPDATE_RC() \
1961 do { \
1962 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1963 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1964 break; \
1965 if (!rc || rc2 < rc) \
1966 rc = rc2; \
1967 } while (0)
1968 VBOXVMM_EM_FF_ALL(pVCpu, pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions, rc);
1969
1970 /*
1971 * Post execution chunk first.
1972 */
1973 if ( VM_FF_IS_PENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1974 || (VMCPU_FF_NORMAL_PRIORITY_POST_MASK && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK)) )
1975 {
1976 /*
1977 * EMT Rendezvous (must be serviced before termination).
1978 */
1979 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1980 {
1981 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
1982 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1983 UPDATE_RC();
1984 /** @todo HACK ALERT! The following test is to make sure EM+TM
1985 * thinks the VM is stopped/reset before the next VM state change
1986 * is made. We need a better solution for this, or at least make it
1987 * possible to do: (rc >= VINF_EM_FIRST && rc <=
1988 * VINF_EM_SUSPEND). */
1989 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1990 {
1991 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1992 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1993 return rc;
1994 }
1995 }
1996
1997 /*
1998 * State change request (cleared by vmR3SetStateLocked).
1999 */
2000 if (VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2001 {
2002 VMSTATE enmState = VMR3GetState(pVM);
2003 switch (enmState)
2004 {
2005 case VMSTATE_FATAL_ERROR:
2006 case VMSTATE_FATAL_ERROR_LS:
2007 case VMSTATE_GURU_MEDITATION:
2008 case VMSTATE_GURU_MEDITATION_LS:
2009 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2010 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2011 return VINF_EM_SUSPEND;
2012
2013 case VMSTATE_DESTROYING:
2014 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2015 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2016 return VINF_EM_TERMINATE;
2017
2018 default:
2019 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2020 }
2021 }
2022
2023 /*
2024 * Debugger Facility polling.
2025 */
2026 if ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2027 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2028 {
2029 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2030 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2031 UPDATE_RC();
2032 }
2033
2034 /*
2035 * Postponed reset request.
2036 */
2037 if (VM_FF_TEST_AND_CLEAR(pVM, VM_FF_RESET))
2038 {
2039 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2040 rc2 = VBOXSTRICTRC_TODO(VMR3ResetFF(pVM));
2041 UPDATE_RC();
2042 }
2043
2044#ifdef VBOX_WITH_RAW_MODE
2045 /*
2046 * CSAM page scanning.
2047 */
2048 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2049 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
2050 {
2051 /** @todo check for 16 or 32 bits code! (D bit in the code selector) */
2052 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
2053 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2054 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2055 CSAMR3CheckCodeEx(pVM, pCtx, pCtx->eip);
2056 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
2057 }
2058#endif
2059
2060 /*
2061 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
2062 */
2063 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2064 {
2065 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2066 UPDATE_RC();
2067 if (rc == VINF_EM_NO_MEMORY)
2068 return rc;
2069 }
2070
2071 /* check that we got them all */
2072 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2073 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == (VM_WHEN_RAW_MODE(VMCPU_FF_CSAM_SCAN_PAGE, 0) | VMCPU_FF_DBGF));
2074 }
2075
2076 /*
2077 * Normal priority then.
2078 * (Executed in no particular order.)
2079 */
2080 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
2081 {
2082 /*
2083 * PDM Queues are pending.
2084 */
2085 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
2086 PDMR3QueueFlushAll(pVM);
2087
2088 /*
2089 * PDM DMA transfers are pending.
2090 */
2091 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
2092 PDMR3DmaRun(pVM);
2093
2094 /*
2095 * EMT Rendezvous (make sure they are handled before the requests).
2096 */
2097 if (VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2098 {
2099 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2100 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2101 UPDATE_RC();
2102 /** @todo HACK ALERT! The following test is to make sure EM+TM
2103 * thinks the VM is stopped/reset before the next VM state change
2104 * is made. We need a better solution for this, or at least make it
2105 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2106 * VINF_EM_SUSPEND). */
2107 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2108 {
2109 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2110 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2111 return rc;
2112 }
2113 }
2114
2115 /*
2116 * Requests from other threads.
2117 */
2118 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
2119 {
2120 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2121 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY, false /*fPriorityOnly*/);
2122 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
2123 {
2124 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2125 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2126 return rc2;
2127 }
2128 UPDATE_RC();
2129 /** @todo HACK ALERT! The following test is to make sure EM+TM
2130 * thinks the VM is stopped/reset before the next VM state change
2131 * is made. We need a better solution for this, or at least make it
2132 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2133 * VINF_EM_SUSPEND). */
2134 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2135 {
2136 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2137 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2138 return rc;
2139 }
2140 }
2141
2142#ifdef VBOX_WITH_REM
2143 /* Replay the handler notification changes. */
2144 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
2145 {
2146 /* Try not to cause deadlocks. */
2147 if ( pVM->cCpus == 1
2148 || ( !PGMIsLockOwner(pVM)
2149 && !IOMIsLockWriteOwner(pVM))
2150 )
2151 {
2152 EMRemLock(pVM);
2153 REMR3ReplayHandlerNotifications(pVM);
2154 EMRemUnlock(pVM);
2155 }
2156 }
2157#endif
2158
2159 /* check that we got them all */
2160 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
2161 }
2162
2163 /*
2164 * Normal priority then. (per-VCPU)
2165 * (Executed in no particular order.)
2166 */
2167 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2168 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
2169 {
2170 /*
2171 * Requests from other threads.
2172 */
2173 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
2174 {
2175 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2176 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu, false /*fPriorityOnly*/);
2177 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
2178 {
2179 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
2180 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2181 return rc2;
2182 }
2183 UPDATE_RC();
2184 /** @todo HACK ALERT! The following test is to make sure EM+TM
2185 * thinks the VM is stopped/reset before the next VM state change
2186 * is made. We need a better solution for this, or at least make it
2187 * possible to do: (rc >= VINF_EM_FIRST && rc <=
2188 * VINF_EM_SUSPEND). */
2189 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2190 {
2191 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2192 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2193 return rc;
2194 }
2195 }
2196
2197 /* check that we got them all */
2198 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~VMCPU_FF_REQUEST));
2199 }
2200
2201 /*
2202 * High priority pre execution chunk last.
2203 * (Executed in ascending priority order.)
2204 */
2205 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
2206 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
2207 {
2208 /*
2209 * Timers before interrupts.
2210 */
2211 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TIMER)
2212 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2213 TMR3TimerQueuesDo(pVM);
2214
2215 /*
2216 * Pick up asynchronously posted interrupts into the APIC.
2217 */
2218 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2219 APICUpdatePendingInterrupts(pVCpu);
2220
2221 /*
2222 * The instruction following an emulated STI should *always* be executed!
2223 *
2224 * Note! We intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if
2225 * the eip is the same as the inhibited instr address. Before we
2226 * are able to execute this instruction in raw mode (iret to
2227 * guest code) an external interrupt might force a world switch
2228 * again. Possibly allowing a guest interrupt to be dispatched
2229 * in the process. This could break the guest. Sounds very
2230 * unlikely, but such timing sensitive problem are not as rare as
2231 * you might think.
2232 */
2233 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2234 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2235 {
2236 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RIP);
2237 if (CPUMGetGuestRIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
2238 {
2239 Log(("Clearing VMCPU_FF_INHIBIT_INTERRUPTS at %RGv - successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
2240 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2241 }
2242 else
2243 Log(("Leaving VMCPU_FF_INHIBIT_INTERRUPTS set at %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu)));
2244 }
2245
2246 /*
2247 * Interrupts.
2248 */
2249 bool fWakeupPending = false;
2250 if ( !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)
2251 && (!rc || rc >= VINF_EM_RESCHEDULE_HM))
2252 {
2253 if ( !VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
2254 && !TRPMHasTrap(pVCpu)) /* an interrupt could already be scheduled for dispatching in the recompiler. */
2255 {
2256 Assert(!HMR3IsEventPending(pVCpu));
2257 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
2258#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2259 if (CPUMIsGuestInNestedHwVirtMode(pCtx))
2260 {
2261 bool fResched, fInject;
2262 rc2 = emR3NstGstInjectIntr(pVCpu, pCtx, &fResched, &fInject);
2263 if (fInject)
2264 {
2265 fWakeupPending = true;
2266# ifdef VBOX_STRICT
2267 rcIrq = rc2;
2268# endif
2269 }
2270 if (fResched)
2271 UPDATE_RC();
2272 }
2273 else
2274#endif
2275 {
2276 CPUM_ASSERT_NOT_EXTRN(pVCpu, CPUMCTX_EXTRN_RFLAGS);
2277 if ( VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)
2278#ifdef VBOX_WITH_NESTED_HWVIRT_SVM
2279 && pCtx->hwvirt.fGif
2280#endif
2281#ifdef VBOX_WITH_RAW_MODE
2282 && !PATMIsPatchGCAddr(pVM, pCtx->eip)
2283#endif
2284 && pCtx->eflags.Bits.u1IF)
2285 {
2286 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
2287 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
2288 /** @todo this really isn't nice, should properly handle this */
2289 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_XCPT_MASK);
2290 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
2291 Log(("EM: TRPMR3InjectEvent -> %d\n", rc2));
2292 if (pVM->em.s.fIemExecutesAll && ( rc2 == VINF_EM_RESCHEDULE_REM
2293 || rc2 == VINF_EM_RESCHEDULE_HM
2294 || rc2 == VINF_EM_RESCHEDULE_RAW))
2295 {
2296 rc2 = VINF_EM_RESCHEDULE;
2297 }
2298#ifdef VBOX_STRICT
2299 rcIrq = rc2;
2300#endif
2301 UPDATE_RC();
2302 /* Reschedule required: We must not miss the wakeup below! */
2303 fWakeupPending = true;
2304 }
2305 }
2306 }
2307 }
2308
2309 /*
2310 * Allocate handy pages.
2311 */
2312 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
2313 {
2314 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2315 UPDATE_RC();
2316 }
2317
2318 /*
2319 * Debugger Facility request.
2320 */
2321 if ( ( VM_FF_IS_PENDING(pVM, VM_FF_DBGF)
2322 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_DBGF) )
2323 && !VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY) )
2324 {
2325 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2326 rc2 = DBGFR3VMMForcedAction(pVM, pVCpu);
2327 UPDATE_RC();
2328 }
2329
2330 /*
2331 * EMT Rendezvous (must be serviced before termination).
2332 */
2333 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2334 && VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS))
2335 {
2336 CPUM_IMPORT_EXTRN_RCSTRICT(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK, rc);
2337 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
2338 UPDATE_RC();
2339 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
2340 * stopped/reset before the next VM state change is made. We need a better
2341 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
2342 * && rc >= VINF_EM_SUSPEND). */
2343 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
2344 {
2345 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2346 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2347 return rc;
2348 }
2349 }
2350
2351 /*
2352 * State change request (cleared by vmR3SetStateLocked).
2353 */
2354 if ( !fWakeupPending /* don't miss the wakeup from EMSTATE_HALTED! */
2355 && VM_FF_IS_PENDING(pVM, VM_FF_CHECK_VM_STATE))
2356 {
2357 VMSTATE enmState = VMR3GetState(pVM);
2358 switch (enmState)
2359 {
2360 case VMSTATE_FATAL_ERROR:
2361 case VMSTATE_FATAL_ERROR_LS:
2362 case VMSTATE_GURU_MEDITATION:
2363 case VMSTATE_GURU_MEDITATION_LS:
2364 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
2365 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2366 return VINF_EM_SUSPEND;
2367
2368 case VMSTATE_DESTROYING:
2369 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
2370 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2371 return VINF_EM_TERMINATE;
2372
2373 default:
2374 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
2375 }
2376 }
2377
2378 /*
2379 * Out of memory? Since most of our fellow high priority actions may cause us
2380 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
2381 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
2382 * than us since we can terminate without allocating more memory.
2383 */
2384 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
2385 {
2386 rc2 = PGMR3PhysAllocateHandyPages(pVM);
2387 UPDATE_RC();
2388 if (rc == VINF_EM_NO_MEMORY)
2389 return rc;
2390 }
2391
2392 /*
2393 * If the virtual sync clock is still stopped, make TM restart it.
2394 */
2395 if (VM_FF_IS_PENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
2396 TMR3VirtualSyncFF(pVM, pVCpu);
2397
2398#ifdef DEBUG
2399 /*
2400 * Debug, pause the VM.
2401 */
2402 if (VM_FF_IS_PENDING(pVM, VM_FF_DEBUG_SUSPEND))
2403 {
2404 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
2405 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
2406 return VINF_EM_SUSPEND;
2407 }
2408#endif
2409
2410 /* check that we got them all */
2411 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
2412 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_UPDATE_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_INHIBIT_INTERRUPTS | VMCPU_FF_DBGF | VM_WHEN_RAW_MODE(VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT, 0)));
2413 }
2414
2415#undef UPDATE_RC
2416 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
2417 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
2418 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
2419 return rc;
2420}
2421
2422
2423/**
2424 * Check if the preset execution time cap restricts guest execution scheduling.
2425 *
2426 * @returns true if allowed, false otherwise
2427 * @param pVM The cross context VM structure.
2428 * @param pVCpu The cross context virtual CPU structure.
2429 */
2430bool emR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
2431{
2432 uint64_t u64UserTime, u64KernelTime;
2433
2434 if ( pVM->uCpuExecutionCap != 100
2435 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
2436 {
2437 uint64_t u64TimeNow = RTTimeMilliTS();
2438 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
2439 {
2440 /* New time slice. */
2441 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
2442 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
2443 pVCpu->em.s.u64TimeSliceExec = 0;
2444 }
2445 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
2446
2447 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
2448 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
2449 return false;
2450 }
2451 return true;
2452}
2453
2454
2455/**
2456 * Execute VM.
2457 *
2458 * This function is the main loop of the VM. The emulation thread
2459 * calls this function when the VM has been successfully constructed
2460 * and we're ready for executing the VM.
2461 *
2462 * Returning from this function means that the VM is turned off or
2463 * suspended (state already saved) and deconstruction is next in line.
2464 *
2465 * All interaction from other thread are done using forced actions
2466 * and signaling of the wait object.
2467 *
2468 * @returns VBox status code, informational status codes may indicate failure.
2469 * @param pVM The cross context VM structure.
2470 * @param pVCpu The cross context virtual CPU structure.
2471 */
2472VMMR3_INT_DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
2473{
2474 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
2475 pVM,
2476 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
2477 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
2478 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
2479 pVCpu->em.s.fForceRAW));
2480 VM_ASSERT_EMT(pVM);
2481 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
2482 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
2483 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
2484 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2485
2486 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
2487 if (rc == 0)
2488 {
2489 /*
2490 * Start the virtual time.
2491 */
2492 TMR3NotifyResume(pVM, pVCpu);
2493
2494 /*
2495 * The Outer Main Loop.
2496 */
2497 bool fFFDone = false;
2498
2499 /* Reschedule right away to start in the right state. */
2500 rc = VINF_SUCCESS;
2501
2502 /* If resuming after a pause or a state load, restore the previous
2503 state or else we'll start executing code. Else, just reschedule. */
2504 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
2505 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2506 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
2507 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2508 else
2509 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2510 pVCpu->em.s.cIemThenRemInstructions = 0;
2511 Log(("EMR3ExecuteVM: enmState=%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
2512
2513 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2514 for (;;)
2515 {
2516 /*
2517 * Before we can schedule anything (we're here because
2518 * scheduling is required) we must service any pending
2519 * forced actions to avoid any pending action causing
2520 * immediate rescheduling upon entering an inner loop
2521 *
2522 * Do forced actions.
2523 */
2524 if ( !fFFDone
2525 && RT_SUCCESS(rc)
2526 && rc != VINF_EM_TERMINATE
2527 && rc != VINF_EM_OFF
2528 && ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_REM_MASK)
2529 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_REM_MASK & ~VMCPU_FF_UNHALT)))
2530 {
2531 rc = emR3ForcedActions(pVM, pVCpu, rc);
2532 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
2533 if ( ( rc == VINF_EM_RESCHEDULE_REM
2534 || rc == VINF_EM_RESCHEDULE_HM)
2535 && pVCpu->em.s.fForceRAW)
2536 rc = VINF_EM_RESCHEDULE_RAW;
2537 }
2538 else if (fFFDone)
2539 fFFDone = false;
2540
2541 /*
2542 * Now what to do?
2543 */
2544 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
2545 EMSTATE const enmOldState = pVCpu->em.s.enmState;
2546 switch (rc)
2547 {
2548 /*
2549 * Keep doing what we're currently doing.
2550 */
2551 case VINF_SUCCESS:
2552 break;
2553
2554 /*
2555 * Reschedule - to raw-mode execution.
2556 */
2557/** @todo r=bird: consider merging VINF_EM_RESCHEDULE_RAW with VINF_EM_RESCHEDULE_HM, they serve the same purpose here at least. */
2558 case VINF_EM_RESCHEDULE_RAW:
2559 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2560 if (VM_IS_RAW_MODE_ENABLED(pVM))
2561 {
2562 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", enmOldState, EMSTATE_RAW));
2563 pVCpu->em.s.enmState = EMSTATE_RAW;
2564 }
2565 else
2566 {
2567 AssertLogRelFailed();
2568 pVCpu->em.s.enmState = EMSTATE_NONE;
2569 }
2570 break;
2571
2572 /*
2573 * Reschedule - to HM or NEM.
2574 */
2575 case VINF_EM_RESCHEDULE_HM:
2576 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2577 Assert(!pVCpu->em.s.fForceRAW);
2578 if (VM_IS_HM_ENABLED(pVM))
2579 {
2580 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_HM)\n", enmOldState, EMSTATE_HM));
2581 pVCpu->em.s.enmState = EMSTATE_HM;
2582 }
2583 else if (VM_IS_NEM_ENABLED(pVM))
2584 {
2585 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HM: %d -> %d (EMSTATE_NEM)\n", enmOldState, EMSTATE_NEM));
2586 pVCpu->em.s.enmState = EMSTATE_NEM;
2587 }
2588 else
2589 {
2590 AssertLogRelFailed();
2591 pVCpu->em.s.enmState = EMSTATE_NONE;
2592 }
2593 break;
2594
2595 /*
2596 * Reschedule - to recompiled execution.
2597 */
2598 case VINF_EM_RESCHEDULE_REM:
2599 Assert(!pVM->em.s.fIemExecutesAll || pVCpu->em.s.enmState != EMSTATE_IEM);
2600 if (!VM_IS_RAW_MODE_ENABLED(pVM))
2601 {
2602 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_IEM_THEN_REM)\n",
2603 enmOldState, EMSTATE_IEM_THEN_REM));
2604 if (pVCpu->em.s.enmState != EMSTATE_IEM_THEN_REM)
2605 {
2606 pVCpu->em.s.enmState = EMSTATE_IEM_THEN_REM;
2607 pVCpu->em.s.cIemThenRemInstructions = 0;
2608 }
2609 }
2610 else
2611 {
2612 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", enmOldState, EMSTATE_REM));
2613 pVCpu->em.s.enmState = EMSTATE_REM;
2614 }
2615 break;
2616
2617 /*
2618 * Resume.
2619 */
2620 case VINF_EM_RESUME:
2621 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", enmOldState));
2622 /* Don't reschedule in the halted or wait for SIPI case. */
2623 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
2624 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
2625 {
2626 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2627 break;
2628 }
2629 /* fall through and get scheduled. */
2630 RT_FALL_THRU();
2631
2632 /*
2633 * Reschedule.
2634 */
2635 case VINF_EM_RESCHEDULE:
2636 {
2637 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2638 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2639 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2640 pVCpu->em.s.cIemThenRemInstructions = 0;
2641 pVCpu->em.s.enmState = enmState;
2642 break;
2643 }
2644
2645 /*
2646 * Halted.
2647 */
2648 case VINF_EM_HALT:
2649 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", enmOldState, EMSTATE_HALTED));
2650 pVCpu->em.s.enmState = EMSTATE_HALTED;
2651 break;
2652
2653 /*
2654 * Switch to the wait for SIPI state (application processor only)
2655 */
2656 case VINF_EM_WAIT_SIPI:
2657 Assert(pVCpu->idCpu != 0);
2658 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", enmOldState, EMSTATE_WAIT_SIPI));
2659 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2660 break;
2661
2662
2663 /*
2664 * Suspend.
2665 */
2666 case VINF_EM_SUSPEND:
2667 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2668 Assert(enmOldState != EMSTATE_SUSPENDED);
2669 pVCpu->em.s.enmPrevState = enmOldState;
2670 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2671 break;
2672
2673 /*
2674 * Reset.
2675 * We might end up doing a double reset for now, we'll have to clean up the mess later.
2676 */
2677 case VINF_EM_RESET:
2678 {
2679 if (pVCpu->idCpu == 0)
2680 {
2681 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
2682 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", enmOldState, enmState, emR3GetStateName(enmState)));
2683 if (pVCpu->em.s.enmState != enmState && enmState == EMSTATE_IEM_THEN_REM)
2684 pVCpu->em.s.cIemThenRemInstructions = 0;
2685 pVCpu->em.s.enmState = enmState;
2686 }
2687 else
2688 {
2689 /* All other VCPUs go into the wait for SIPI state. */
2690 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
2691 }
2692 break;
2693 }
2694
2695 /*
2696 * Power Off.
2697 */
2698 case VINF_EM_OFF:
2699 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2700 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2701 TMR3NotifySuspend(pVM, pVCpu);
2702 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2703 return rc;
2704
2705 /*
2706 * Terminate the VM.
2707 */
2708 case VINF_EM_TERMINATE:
2709 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
2710 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", enmOldState, EMSTATE_TERMINATING));
2711 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
2712 TMR3NotifySuspend(pVM, pVCpu);
2713 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2714 return rc;
2715
2716
2717 /*
2718 * Out of memory, suspend the VM and stuff.
2719 */
2720 case VINF_EM_NO_MEMORY:
2721 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", enmOldState, EMSTATE_SUSPENDED));
2722 Assert(enmOldState != EMSTATE_SUSPENDED);
2723 pVCpu->em.s.enmPrevState = enmOldState;
2724 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2725 TMR3NotifySuspend(pVM, pVCpu);
2726 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2727
2728 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
2729 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
2730 if (rc != VINF_EM_SUSPEND)
2731 {
2732 if (RT_SUCCESS_NP(rc))
2733 {
2734 AssertLogRelMsgFailed(("%Rrc\n", rc));
2735 rc = VERR_EM_INTERNAL_ERROR;
2736 }
2737 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2738 }
2739 return rc;
2740
2741 /*
2742 * Guest debug events.
2743 */
2744 case VINF_EM_DBG_STEPPED:
2745 case VINF_EM_DBG_STOP:
2746 case VINF_EM_DBG_EVENT:
2747 case VINF_EM_DBG_BREAKPOINT:
2748 case VINF_EM_DBG_STEP:
2749 if (enmOldState == EMSTATE_RAW)
2750 {
2751 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_RAW));
2752 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
2753 }
2754 else if (enmOldState == EMSTATE_HM)
2755 {
2756 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_HM));
2757 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_HM;
2758 }
2759 else if (enmOldState == EMSTATE_NEM)
2760 {
2761 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_NEM));
2762 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_NEM;
2763 }
2764 else if (enmOldState == EMSTATE_REM)
2765 {
2766 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_REM));
2767 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
2768 }
2769 else
2770 {
2771 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_GUEST_IEM));
2772 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_IEM;
2773 }
2774 break;
2775
2776 /*
2777 * Hypervisor debug events.
2778 */
2779 case VINF_EM_DBG_HYPER_STEPPED:
2780 case VINF_EM_DBG_HYPER_BREAKPOINT:
2781 case VINF_EM_DBG_HYPER_ASSERTION:
2782 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, enmOldState, EMSTATE_DEBUG_HYPER));
2783 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
2784 break;
2785
2786 /*
2787 * Triple fault.
2788 */
2789 case VINF_EM_TRIPLE_FAULT:
2790 if (!pVM->em.s.fGuruOnTripleFault)
2791 {
2792 Log(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: CPU reset...\n"));
2793 rc = VBOXSTRICTRC_TODO(VMR3ResetTripleFault(pVM));
2794 Log2(("EMR3ExecuteVM: VINF_EM_TRIPLE_FAULT: %d -> %d (rc=%Rrc)\n", enmOldState, pVCpu->em.s.enmState, rc));
2795 continue;
2796 }
2797 /* Else fall through and trigger a guru. */
2798 RT_FALL_THRU();
2799
2800 case VERR_VMM_RING0_ASSERTION:
2801 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2802 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2803 break;
2804
2805 /*
2806 * Any error code showing up here other than the ones we
2807 * know and process above are considered to be FATAL.
2808 *
2809 * Unknown warnings and informational status codes are also
2810 * included in this.
2811 */
2812 default:
2813 if (RT_SUCCESS_NP(rc))
2814 {
2815 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
2816 rc = VERR_EM_INTERNAL_ERROR;
2817 }
2818 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, enmOldState, EMSTATE_GURU_MEDITATION));
2819 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2820 break;
2821 }
2822
2823 /*
2824 * Act on state transition.
2825 */
2826 EMSTATE const enmNewState = pVCpu->em.s.enmState;
2827 if (enmOldState != enmNewState)
2828 {
2829 VBOXVMM_EM_STATE_CHANGED(pVCpu, enmOldState, enmNewState, rc);
2830
2831 /* Clear MWait flags and the unhalt FF. */
2832 if ( enmOldState == EMSTATE_HALTED
2833 && ( (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2834 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2835 && ( enmNewState == EMSTATE_RAW
2836 || enmNewState == EMSTATE_HM
2837 || enmNewState == EMSTATE_NEM
2838 || enmNewState == EMSTATE_REM
2839 || enmNewState == EMSTATE_IEM_THEN_REM
2840 || enmNewState == EMSTATE_DEBUG_GUEST_RAW
2841 || enmNewState == EMSTATE_DEBUG_GUEST_HM
2842 || enmNewState == EMSTATE_DEBUG_GUEST_NEM
2843 || enmNewState == EMSTATE_DEBUG_GUEST_IEM
2844 || enmNewState == EMSTATE_DEBUG_GUEST_REM) )
2845 {
2846 if (pVCpu->em.s.MWait.fWait & EMMWAIT_FLAG_ACTIVE)
2847 {
2848 LogFlow(("EMR3ExecuteVM: Clearing MWAIT\n"));
2849 pVCpu->em.s.MWait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2850 }
2851 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_UNHALT))
2852 {
2853 LogFlow(("EMR3ExecuteVM: Clearing UNHALT\n"));
2854 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_UNHALT);
2855 }
2856 }
2857 }
2858 else
2859 VBOXVMM_EM_STATE_UNCHANGED(pVCpu, enmNewState, rc);
2860
2861 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
2862 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2863
2864 /*
2865 * Act on the new state.
2866 */
2867 switch (enmNewState)
2868 {
2869 /*
2870 * Execute raw.
2871 */
2872 case EMSTATE_RAW:
2873#ifdef VBOX_WITH_RAW_MODE
2874 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
2875#else
2876 AssertLogRelMsgFailed(("%Rrc\n", rc));
2877 rc = VERR_EM_INTERNAL_ERROR;
2878#endif
2879 break;
2880
2881 /*
2882 * Execute hardware accelerated raw.
2883 */
2884 case EMSTATE_HM:
2885 rc = emR3HmExecute(pVM, pVCpu, &fFFDone);
2886 break;
2887
2888 /*
2889 * Execute hardware accelerated raw.
2890 */
2891 case EMSTATE_NEM:
2892 rc = VBOXSTRICTRC_TODO(emR3NemExecute(pVM, pVCpu, &fFFDone));
2893 break;
2894
2895 /*
2896 * Execute recompiled.
2897 */
2898 case EMSTATE_REM:
2899 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2900 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2901 break;
2902
2903 /*
2904 * Execute in the interpreter.
2905 */
2906 case EMSTATE_IEM:
2907 {
2908#if 0 /* For testing purposes. */
2909 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x1);
2910 rc = VBOXSTRICTRC_TODO(EMR3HmSingleInstruction(pVM, pVCpu, EM_ONE_INS_FLAGS_RIP_CHANGE));
2911 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x1);
2912 if (rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_RESCHEDULE_HM || rc == VINF_EM_RESCHEDULE_REM || rc == VINF_EM_RESCHEDULE_RAW)
2913 rc = VINF_SUCCESS;
2914 else if (rc == VERR_EM_CANNOT_EXEC_GUEST)
2915#endif
2916 rc = VBOXSTRICTRC_TODO(IEMExecLots(pVCpu, NULL /*pcInstructions*/));
2917 if (pVM->em.s.fIemExecutesAll)
2918 {
2919 Assert(rc != VINF_EM_RESCHEDULE_REM);
2920 Assert(rc != VINF_EM_RESCHEDULE_RAW);
2921 Assert(rc != VINF_EM_RESCHEDULE_HM);
2922 }
2923 fFFDone = false;
2924 break;
2925 }
2926
2927 /*
2928 * Execute in IEM, hoping we can quickly switch aback to HM
2929 * or RAW execution. If our hopes fail, we go to REM.
2930 */
2931 case EMSTATE_IEM_THEN_REM:
2932 {
2933 STAM_PROFILE_START(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2934 rc = VBOXSTRICTRC_TODO(emR3ExecuteIemThenRem(pVM, pVCpu, &fFFDone));
2935 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMThenREM, pIemThenRem);
2936 break;
2937 }
2938
2939 /*
2940 * Application processor execution halted until SIPI.
2941 */
2942 case EMSTATE_WAIT_SIPI:
2943 /* no break */
2944 /*
2945 * hlt - execution halted until interrupt.
2946 */
2947 case EMSTATE_HALTED:
2948 {
2949 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2950 /* If HM (or someone else) store a pending interrupt in
2951 TRPM, it must be dispatched ASAP without any halting.
2952 Anything pending in TRPM has been accepted and the CPU
2953 should already be the right state to receive it. */
2954 if (TRPMHasTrap(pVCpu))
2955 rc = VINF_EM_RESCHEDULE;
2956 /* MWAIT has a special extension where it's woken up when
2957 an interrupt is pending even when IF=0. */
2958 else if ( (pVCpu->em.s.MWait.fWait & (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2959 == (EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0))
2960 {
2961 rc = VMR3WaitHalted(pVM, pVCpu, false /*fIgnoreInterrupts*/);
2962 if (rc == VINF_SUCCESS)
2963 {
2964 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_UPDATE_APIC))
2965 APICUpdatePendingInterrupts(pVCpu);
2966
2967 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC
2968 | VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2969 {
2970 Log(("EMR3ExecuteVM: Triggering reschedule on pending IRQ after MWAIT\n"));
2971 rc = VINF_EM_RESCHEDULE;
2972 }
2973 }
2974 }
2975 else
2976 {
2977 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2978 /* We're only interested in NMI/SMIs here which have their own FFs, so we don't need to
2979 check VMCPU_FF_UPDATE_APIC here. */
2980 if ( rc == VINF_SUCCESS
2981 && VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_INTERRUPT_NMI | VMCPU_FF_INTERRUPT_SMI | VMCPU_FF_UNHALT))
2982 {
2983 Log(("EMR3ExecuteVM: Triggering reschedule on pending NMI/SMI/UNHALT after HLT\n"));
2984 rc = VINF_EM_RESCHEDULE;
2985 }
2986 }
2987
2988 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2989 break;
2990 }
2991
2992 /*
2993 * Suspended - return to VM.cpp.
2994 */
2995 case EMSTATE_SUSPENDED:
2996 TMR3NotifySuspend(pVM, pVCpu);
2997 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2998 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
2999 return VINF_EM_SUSPEND;
3000
3001 /*
3002 * Debugging in the guest.
3003 */
3004 case EMSTATE_DEBUG_GUEST_RAW:
3005 case EMSTATE_DEBUG_GUEST_HM:
3006 case EMSTATE_DEBUG_GUEST_NEM:
3007 case EMSTATE_DEBUG_GUEST_IEM:
3008 case EMSTATE_DEBUG_GUEST_REM:
3009 TMR3NotifySuspend(pVM, pVCpu);
3010 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3011 TMR3NotifyResume(pVM, pVCpu);
3012 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3013 break;
3014
3015 /*
3016 * Debugging in the hypervisor.
3017 */
3018 case EMSTATE_DEBUG_HYPER:
3019 {
3020 TMR3NotifySuspend(pVM, pVCpu);
3021 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3022
3023 rc = VBOXSTRICTRC_TODO(emR3Debug(pVM, pVCpu, rc));
3024 Log2(("EMR3ExecuteVM: emR3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
3025 if (rc != VINF_SUCCESS)
3026 {
3027 if (rc == VINF_EM_OFF || rc == VINF_EM_TERMINATE)
3028 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
3029 else
3030 {
3031 /* switch to guru meditation mode */
3032 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3033 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3034 VMMR3FatalDump(pVM, pVCpu, rc);
3035 }
3036 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3037 return rc;
3038 }
3039
3040 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
3041 TMR3NotifyResume(pVM, pVCpu);
3042 break;
3043 }
3044
3045 /*
3046 * Guru meditation takes place in the debugger.
3047 */
3048 case EMSTATE_GURU_MEDITATION:
3049 {
3050 TMR3NotifySuspend(pVM, pVCpu);
3051 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3052 VMMR3FatalDump(pVM, pVCpu, rc);
3053 emR3Debug(pVM, pVCpu, rc);
3054 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3055 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3056 return rc;
3057 }
3058
3059 /*
3060 * The states we don't expect here.
3061 */
3062 case EMSTATE_NONE:
3063 case EMSTATE_TERMINATING:
3064 default:
3065 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
3066 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
3067 TMR3NotifySuspend(pVM, pVCpu);
3068 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3069 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(enmOldState)));
3070 return VERR_EM_INTERNAL_ERROR;
3071 }
3072 } /* The Outer Main Loop */
3073 }
3074 else
3075 {
3076 /*
3077 * Fatal error.
3078 */
3079 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
3080 TMR3NotifySuspend(pVM, pVCpu);
3081 VMR3SetGuruMeditation(pVM); /* This notifies the other EMTs. */
3082 VMMR3FatalDump(pVM, pVCpu, rc);
3083 emR3Debug(pVM, pVCpu, rc);
3084 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
3085 /** @todo change the VM state! */
3086 return rc;
3087 }
3088
3089 /* not reached */
3090}
3091
3092/**
3093 * Notify EM of a state change (used by FTM)
3094 *
3095 * @param pVM The cross context VM structure.
3096 */
3097VMMR3_INT_DECL(int) EMR3NotifySuspend(PVM pVM)
3098{
3099 PVMCPU pVCpu = VMMGetCpu(pVM);
3100
3101 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
3102 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
3103 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
3104 return VINF_SUCCESS;
3105}
3106
3107/**
3108 * Notify EM of a state change (used by FTM)
3109 *
3110 * @param pVM The cross context VM structure.
3111 */
3112VMMR3_INT_DECL(int) EMR3NotifyResume(PVM pVM)
3113{
3114 PVMCPU pVCpu = VMMGetCpu(pVM);
3115 EMSTATE enmCurState = pVCpu->em.s.enmState;
3116
3117 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
3118 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
3119 pVCpu->em.s.enmPrevState = enmCurState;
3120 return VINF_SUCCESS;
3121}
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette