VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 34904

Last change on this file since 34904 was 34326, checked in by vboxsync, 14 years ago

VMM: Removed the XXXInitCPU and XXXTermCPU methods since all but the HWACCM ones where stubs and the XXXTermCPU bits was not called in all expected paths. The HWACCMR3InitCPU was hooked up as a VMINITCOMPLETED_RING3 hook, essentially leaving it's position in the order of things unchanged, while the HWACCMR3TermCPU call was made static without changing its position at the end of HWACCMR3Term.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 100.9 KB
Line 
1/* $Id: EM.cpp 34326 2010-11-24 14:03:55Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/em.h>
39#include <VBox/vmm.h>
40#include <VBox/patm.h>
41#include <VBox/csam.h>
42#include <VBox/selm.h>
43#include <VBox/trpm.h>
44#include <VBox/iom.h>
45#include <VBox/dbgf.h>
46#include <VBox/pgm.h>
47#include <VBox/rem.h>
48#include <VBox/tm.h>
49#include <VBox/mm.h>
50#include <VBox/ssm.h>
51#include <VBox/pdmapi.h>
52#include <VBox/pdmcritsect.h>
53#include <VBox/pdmqueue.h>
54#include <VBox/hwaccm.h>
55#include <VBox/patm.h>
56#include "EMInternal.h"
57#include "include/internal/em.h"
58#include <VBox/vm.h>
59#include <VBox/cpumdis.h>
60#include <VBox/dis.h>
61#include <VBox/disopcode.h>
62#include <VBox/dbgf.h>
63
64#include <iprt/asm.h>
65#include <iprt/string.h>
66#include <iprt/stream.h>
67#include <iprt/thread.h>
68
69
70/*******************************************************************************
71* Defined Constants And Macros *
72*******************************************************************************/
73#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
74#define EM_NOTIFY_HWACCM
75#endif
76
77
78/*******************************************************************************
79* Internal Functions *
80*******************************************************************************/
81static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
82static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass);
83static const char *emR3GetStateName(EMSTATE enmState);
84static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc);
85static int emR3RemStep(PVM pVM, PVMCPU pVCpu);
86static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone);
87int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc);
88
89
90/**
91 * Initializes the EM.
92 *
93 * @returns VBox status code.
94 * @param pVM The VM to operate on.
95 */
96VMMR3DECL(int) EMR3Init(PVM pVM)
97{
98 LogFlow(("EMR3Init\n"));
99 /*
100 * Assert alignment and sizes.
101 */
102 AssertCompileMemberAlignment(VM, em.s, 32);
103 AssertCompile(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
104 AssertCompile(sizeof(pVM->aCpus[0].em.s.u.FatalLongJump) <= sizeof(pVM->aCpus[0].em.s.u.achPaddingFatalLongJump));
105 AssertCompileMemberAlignment(EM, CritSectREM, sizeof(uintptr_t));
106
107 /*
108 * Init the structure.
109 */
110 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
111 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
112 if (RT_FAILURE(rc))
113 pVM->fRawR3Enabled = true;
114 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
115 if (RT_FAILURE(rc))
116 pVM->fRawR0Enabled = true;
117 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
118
119 /*
120 * Initialize the REM critical section.
121 */
122 rc = PDMR3CritSectInit(pVM, &pVM->em.s.CritSectREM, RT_SRC_POS, "EM-REM");
123 AssertRCReturn(rc, rc);
124
125 /*
126 * Saved state.
127 */
128 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
129 NULL, NULL, NULL,
130 NULL, emR3Save, NULL,
131 NULL, emR3Load, NULL);
132 if (RT_FAILURE(rc))
133 return rc;
134
135 for (VMCPUID i = 0; i < pVM->cCpus; i++)
136 {
137 PVMCPU pVCpu = &pVM->aCpus[i];
138
139 pVCpu->em.s.offVMCPU = RT_OFFSETOF(VMCPU, em.s);
140
141 pVCpu->em.s.enmState = (i == 0) ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
142 pVCpu->em.s.enmPrevState = EMSTATE_NONE;
143 pVCpu->em.s.fForceRAW = false;
144
145 pVCpu->em.s.pCtx = CPUMQueryGuestCtxPtr(pVCpu);
146 pVCpu->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
147 AssertMsg(pVCpu->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
148
149 /* Force reset of the time slice. */
150 pVCpu->em.s.u64TimeSliceStart = 0;
151
152# define EM_REG_COUNTER(a, b, c) \
153 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, c, b, i); \
154 AssertRC(rc);
155
156# define EM_REG_COUNTER_USED(a, b, c) \
157 rc = STAMR3RegisterF(pVM, a, STAMTYPE_COUNTER, STAMVISIBILITY_USED, STAMUNIT_OCCURENCES, c, b, i); \
158 AssertRC(rc);
159
160# define EM_REG_PROFILE(a, b, c) \
161 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
162 AssertRC(rc);
163
164# define EM_REG_PROFILE_ADV(a, b, c) \
165 rc = STAMR3RegisterF(pVM, a, STAMTYPE_PROFILE_ADV, STAMVISIBILITY_ALWAYS, STAMUNIT_TICKS_PER_CALL, c, b, i); \
166 AssertRC(rc);
167
168 /*
169 * Statistics.
170 */
171#ifdef VBOX_WITH_STATISTICS
172 PEMSTATS pStats;
173 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
174 if (RT_FAILURE(rc))
175 return rc;
176
177 pVCpu->em.s.pStatsR3 = pStats;
178 pVCpu->em.s.pStatsR0 = MMHyperR3ToR0(pVM, pStats);
179 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pStats);
180
181 EM_REG_PROFILE(&pStats->StatRZEmulate, "/EM/CPU%d/RZ/Interpret", "Profiling of EMInterpretInstruction.");
182 EM_REG_PROFILE(&pStats->StatR3Emulate, "/EM/CPU%d/R3/Interpret", "Profiling of EMInterpretInstruction.");
183
184 EM_REG_PROFILE(&pStats->StatRZInterpretSucceeded, "/EM/CPU%d/RZ/Interpret/Success", "The number of times an instruction was successfully interpreted.");
185 EM_REG_PROFILE(&pStats->StatR3InterpretSucceeded, "/EM/CPU%d/R3/Interpret/Success", "The number of times an instruction was successfully interpreted.");
186
187 EM_REG_COUNTER_USED(&pStats->StatRZAnd, "/EM/CPU%d/RZ/Interpret/Success/And", "The number of times AND was successfully interpreted.");
188 EM_REG_COUNTER_USED(&pStats->StatR3And, "/EM/CPU%d/R3/Interpret/Success/And", "The number of times AND was successfully interpreted.");
189 EM_REG_COUNTER_USED(&pStats->StatRZAdd, "/EM/CPU%d/RZ/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
190 EM_REG_COUNTER_USED(&pStats->StatR3Add, "/EM/CPU%d/R3/Interpret/Success/Add", "The number of times ADD was successfully interpreted.");
191 EM_REG_COUNTER_USED(&pStats->StatRZAdc, "/EM/CPU%d/RZ/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
192 EM_REG_COUNTER_USED(&pStats->StatR3Adc, "/EM/CPU%d/R3/Interpret/Success/Adc", "The number of times ADC was successfully interpreted.");
193 EM_REG_COUNTER_USED(&pStats->StatRZSub, "/EM/CPU%d/RZ/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
194 EM_REG_COUNTER_USED(&pStats->StatR3Sub, "/EM/CPU%d/R3/Interpret/Success/Sub", "The number of times SUB was successfully interpreted.");
195 EM_REG_COUNTER_USED(&pStats->StatRZCpuId, "/EM/CPU%d/RZ/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
196 EM_REG_COUNTER_USED(&pStats->StatR3CpuId, "/EM/CPU%d/R3/Interpret/Success/CpuId", "The number of times CPUID was successfully interpreted.");
197 EM_REG_COUNTER_USED(&pStats->StatRZDec, "/EM/CPU%d/RZ/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
198 EM_REG_COUNTER_USED(&pStats->StatR3Dec, "/EM/CPU%d/R3/Interpret/Success/Dec", "The number of times DEC was successfully interpreted.");
199 EM_REG_COUNTER_USED(&pStats->StatRZHlt, "/EM/CPU%d/RZ/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
200 EM_REG_COUNTER_USED(&pStats->StatR3Hlt, "/EM/CPU%d/R3/Interpret/Success/Hlt", "The number of times HLT was successfully interpreted.");
201 EM_REG_COUNTER_USED(&pStats->StatRZInc, "/EM/CPU%d/RZ/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
202 EM_REG_COUNTER_USED(&pStats->StatR3Inc, "/EM/CPU%d/R3/Interpret/Success/Inc", "The number of times INC was successfully interpreted.");
203 EM_REG_COUNTER_USED(&pStats->StatRZInvlPg, "/EM/CPU%d/RZ/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
204 EM_REG_COUNTER_USED(&pStats->StatR3InvlPg, "/EM/CPU%d/R3/Interpret/Success/Invlpg", "The number of times INVLPG was successfully interpreted.");
205 EM_REG_COUNTER_USED(&pStats->StatRZIret, "/EM/CPU%d/RZ/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
206 EM_REG_COUNTER_USED(&pStats->StatR3Iret, "/EM/CPU%d/R3/Interpret/Success/Iret", "The number of times IRET was successfully interpreted.");
207 EM_REG_COUNTER_USED(&pStats->StatRZLLdt, "/EM/CPU%d/RZ/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
208 EM_REG_COUNTER_USED(&pStats->StatR3LLdt, "/EM/CPU%d/R3/Interpret/Success/LLdt", "The number of times LLDT was successfully interpreted.");
209 EM_REG_COUNTER_USED(&pStats->StatRZLIdt, "/EM/CPU%d/RZ/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
210 EM_REG_COUNTER_USED(&pStats->StatR3LIdt, "/EM/CPU%d/R3/Interpret/Success/LIdt", "The number of times LIDT was successfully interpreted.");
211 EM_REG_COUNTER_USED(&pStats->StatRZLGdt, "/EM/CPU%d/RZ/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
212 EM_REG_COUNTER_USED(&pStats->StatR3LGdt, "/EM/CPU%d/R3/Interpret/Success/LGdt", "The number of times LGDT was successfully interpreted.");
213 EM_REG_COUNTER_USED(&pStats->StatRZMov, "/EM/CPU%d/RZ/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
214 EM_REG_COUNTER_USED(&pStats->StatR3Mov, "/EM/CPU%d/R3/Interpret/Success/Mov", "The number of times MOV was successfully interpreted.");
215 EM_REG_COUNTER_USED(&pStats->StatRZMovCRx, "/EM/CPU%d/RZ/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
216 EM_REG_COUNTER_USED(&pStats->StatR3MovCRx, "/EM/CPU%d/R3/Interpret/Success/MovCRx", "The number of times MOV CRx was successfully interpreted.");
217 EM_REG_COUNTER_USED(&pStats->StatRZMovDRx, "/EM/CPU%d/RZ/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
218 EM_REG_COUNTER_USED(&pStats->StatR3MovDRx, "/EM/CPU%d/R3/Interpret/Success/MovDRx", "The number of times MOV DRx was successfully interpreted.");
219 EM_REG_COUNTER_USED(&pStats->StatRZOr, "/EM/CPU%d/RZ/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
220 EM_REG_COUNTER_USED(&pStats->StatR3Or, "/EM/CPU%d/R3/Interpret/Success/Or", "The number of times OR was successfully interpreted.");
221 EM_REG_COUNTER_USED(&pStats->StatRZPop, "/EM/CPU%d/RZ/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
222 EM_REG_COUNTER_USED(&pStats->StatR3Pop, "/EM/CPU%d/R3/Interpret/Success/Pop", "The number of times POP was successfully interpreted.");
223 EM_REG_COUNTER_USED(&pStats->StatRZRdtsc, "/EM/CPU%d/RZ/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
224 EM_REG_COUNTER_USED(&pStats->StatR3Rdtsc, "/EM/CPU%d/R3/Interpret/Success/Rdtsc", "The number of times RDTSC was successfully interpreted.");
225 EM_REG_COUNTER_USED(&pStats->StatRZRdpmc, "/EM/CPU%d/RZ/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
226 EM_REG_COUNTER_USED(&pStats->StatR3Rdpmc, "/EM/CPU%d/R3/Interpret/Success/Rdpmc", "The number of times RDPMC was successfully interpreted.");
227 EM_REG_COUNTER_USED(&pStats->StatRZSti, "/EM/CPU%d/RZ/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
228 EM_REG_COUNTER_USED(&pStats->StatR3Sti, "/EM/CPU%d/R3/Interpret/Success/Sti", "The number of times STI was successfully interpreted.");
229 EM_REG_COUNTER_USED(&pStats->StatRZXchg, "/EM/CPU%d/RZ/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
230 EM_REG_COUNTER_USED(&pStats->StatR3Xchg, "/EM/CPU%d/R3/Interpret/Success/Xchg", "The number of times XCHG was successfully interpreted.");
231 EM_REG_COUNTER_USED(&pStats->StatRZXor, "/EM/CPU%d/RZ/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
232 EM_REG_COUNTER_USED(&pStats->StatR3Xor, "/EM/CPU%d/R3/Interpret/Success/Xor", "The number of times XOR was successfully interpreted.");
233 EM_REG_COUNTER_USED(&pStats->StatRZMonitor, "/EM/CPU%d/RZ/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
234 EM_REG_COUNTER_USED(&pStats->StatR3Monitor, "/EM/CPU%d/R3/Interpret/Success/Monitor", "The number of times MONITOR was successfully interpreted.");
235 EM_REG_COUNTER_USED(&pStats->StatRZMWait, "/EM/CPU%d/RZ/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
236 EM_REG_COUNTER_USED(&pStats->StatR3MWait, "/EM/CPU%d/R3/Interpret/Success/MWait", "The number of times MWAIT was successfully interpreted.");
237 EM_REG_COUNTER_USED(&pStats->StatRZBtr, "/EM/CPU%d/RZ/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
238 EM_REG_COUNTER_USED(&pStats->StatR3Btr, "/EM/CPU%d/R3/Interpret/Success/Btr", "The number of times BTR was successfully interpreted.");
239 EM_REG_COUNTER_USED(&pStats->StatRZBts, "/EM/CPU%d/RZ/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
240 EM_REG_COUNTER_USED(&pStats->StatR3Bts, "/EM/CPU%d/R3/Interpret/Success/Bts", "The number of times BTS was successfully interpreted.");
241 EM_REG_COUNTER_USED(&pStats->StatRZBtc, "/EM/CPU%d/RZ/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
242 EM_REG_COUNTER_USED(&pStats->StatR3Btc, "/EM/CPU%d/R3/Interpret/Success/Btc", "The number of times BTC was successfully interpreted.");
243 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
244 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg, "/EM/CPU%d/R3/Interpret/Success/CmpXchg", "The number of times CMPXCHG was successfully interpreted.");
245 EM_REG_COUNTER_USED(&pStats->StatRZCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
246 EM_REG_COUNTER_USED(&pStats->StatR3CmpXchg8b, "/EM/CPU%d/R3/Interpret/Success/CmpXchg8b", "The number of times CMPXCHG8B was successfully interpreted.");
247 EM_REG_COUNTER_USED(&pStats->StatRZXAdd, "/EM/CPU%d/RZ/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
248 EM_REG_COUNTER_USED(&pStats->StatR3XAdd, "/EM/CPU%d/R3/Interpret/Success/XAdd", "The number of times XADD was successfully interpreted.");
249 EM_REG_COUNTER_USED(&pStats->StatR3Rdmsr, "/EM/CPU%d/R3/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
250 EM_REG_COUNTER_USED(&pStats->StatRZRdmsr, "/EM/CPU%d/RZ/Interpret/Success/Rdmsr", "The number of times RDMSR was successfully interpreted.");
251 EM_REG_COUNTER_USED(&pStats->StatR3Wrmsr, "/EM/CPU%d/R3/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
252 EM_REG_COUNTER_USED(&pStats->StatRZWrmsr, "/EM/CPU%d/RZ/Interpret/Success/Wrmsr", "The number of times WRMSR was successfully interpreted.");
253 EM_REG_COUNTER_USED(&pStats->StatR3StosWD, "/EM/CPU%d/R3/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
254 EM_REG_COUNTER_USED(&pStats->StatRZStosWD, "/EM/CPU%d/RZ/Interpret/Success/Stoswd", "The number of times STOSWD was successfully interpreted.");
255 EM_REG_COUNTER_USED(&pStats->StatRZWbInvd, "/EM/CPU%d/RZ/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
256 EM_REG_COUNTER_USED(&pStats->StatR3WbInvd, "/EM/CPU%d/R3/Interpret/Success/WbInvd", "The number of times WBINVD was successfully interpreted.");
257 EM_REG_COUNTER_USED(&pStats->StatRZLmsw, "/EM/CPU%d/RZ/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
258 EM_REG_COUNTER_USED(&pStats->StatR3Lmsw, "/EM/CPU%d/R3/Interpret/Success/Lmsw", "The number of times LMSW was successfully interpreted.");
259
260 EM_REG_COUNTER(&pStats->StatRZInterpretFailed, "/EM/CPU%d/RZ/Interpret/Failed", "The number of times an instruction was not interpreted.");
261 EM_REG_COUNTER(&pStats->StatR3InterpretFailed, "/EM/CPU%d/R3/Interpret/Failed", "The number of times an instruction was not interpreted.");
262
263 EM_REG_COUNTER_USED(&pStats->StatRZFailedAnd, "/EM/CPU%d/RZ/Interpret/Failed/And", "The number of times AND was not interpreted.");
264 EM_REG_COUNTER_USED(&pStats->StatR3FailedAnd, "/EM/CPU%d/R3/Interpret/Failed/And", "The number of times AND was not interpreted.");
265 EM_REG_COUNTER_USED(&pStats->StatRZFailedCpuId, "/EM/CPU%d/RZ/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
266 EM_REG_COUNTER_USED(&pStats->StatR3FailedCpuId, "/EM/CPU%d/R3/Interpret/Failed/CpuId", "The number of times CPUID was not interpreted.");
267 EM_REG_COUNTER_USED(&pStats->StatRZFailedDec, "/EM/CPU%d/RZ/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
268 EM_REG_COUNTER_USED(&pStats->StatR3FailedDec, "/EM/CPU%d/R3/Interpret/Failed/Dec", "The number of times DEC was not interpreted.");
269 EM_REG_COUNTER_USED(&pStats->StatRZFailedHlt, "/EM/CPU%d/RZ/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
270 EM_REG_COUNTER_USED(&pStats->StatR3FailedHlt, "/EM/CPU%d/R3/Interpret/Failed/Hlt", "The number of times HLT was not interpreted.");
271 EM_REG_COUNTER_USED(&pStats->StatRZFailedInc, "/EM/CPU%d/RZ/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
272 EM_REG_COUNTER_USED(&pStats->StatR3FailedInc, "/EM/CPU%d/R3/Interpret/Failed/Inc", "The number of times INC was not interpreted.");
273 EM_REG_COUNTER_USED(&pStats->StatRZFailedInvlPg, "/EM/CPU%d/RZ/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
274 EM_REG_COUNTER_USED(&pStats->StatR3FailedInvlPg, "/EM/CPU%d/R3/Interpret/Failed/InvlPg", "The number of times INVLPG was not interpreted.");
275 EM_REG_COUNTER_USED(&pStats->StatRZFailedIret, "/EM/CPU%d/RZ/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
276 EM_REG_COUNTER_USED(&pStats->StatR3FailedIret, "/EM/CPU%d/R3/Interpret/Failed/Iret", "The number of times IRET was not interpreted.");
277 EM_REG_COUNTER_USED(&pStats->StatRZFailedLLdt, "/EM/CPU%d/RZ/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
278 EM_REG_COUNTER_USED(&pStats->StatR3FailedLLdt, "/EM/CPU%d/R3/Interpret/Failed/LLdt", "The number of times LLDT was not interpreted.");
279 EM_REG_COUNTER_USED(&pStats->StatRZFailedLIdt, "/EM/CPU%d/RZ/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
280 EM_REG_COUNTER_USED(&pStats->StatR3FailedLIdt, "/EM/CPU%d/R3/Interpret/Failed/LIdt", "The number of times LIDT was not interpreted.");
281 EM_REG_COUNTER_USED(&pStats->StatRZFailedLGdt, "/EM/CPU%d/RZ/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
282 EM_REG_COUNTER_USED(&pStats->StatR3FailedLGdt, "/EM/CPU%d/R3/Interpret/Failed/LGdt", "The number of times LGDT was not interpreted.");
283 EM_REG_COUNTER_USED(&pStats->StatRZFailedMov, "/EM/CPU%d/RZ/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
284 EM_REG_COUNTER_USED(&pStats->StatR3FailedMov, "/EM/CPU%d/R3/Interpret/Failed/Mov", "The number of times MOV was not interpreted.");
285 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovCRx, "/EM/CPU%d/RZ/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
286 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovCRx, "/EM/CPU%d/R3/Interpret/Failed/MovCRx", "The number of times MOV CRx was not interpreted.");
287 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovDRx, "/EM/CPU%d/RZ/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
288 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovDRx, "/EM/CPU%d/R3/Interpret/Failed/MovDRx", "The number of times MOV DRx was not interpreted.");
289 EM_REG_COUNTER_USED(&pStats->StatRZFailedOr, "/EM/CPU%d/RZ/Interpret/Failed/Or", "The number of times OR was not interpreted.");
290 EM_REG_COUNTER_USED(&pStats->StatR3FailedOr, "/EM/CPU%d/R3/Interpret/Failed/Or", "The number of times OR was not interpreted.");
291 EM_REG_COUNTER_USED(&pStats->StatRZFailedPop, "/EM/CPU%d/RZ/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
292 EM_REG_COUNTER_USED(&pStats->StatR3FailedPop, "/EM/CPU%d/R3/Interpret/Failed/Pop", "The number of times POP was not interpreted.");
293 EM_REG_COUNTER_USED(&pStats->StatRZFailedSti, "/EM/CPU%d/RZ/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
294 EM_REG_COUNTER_USED(&pStats->StatR3FailedSti, "/EM/CPU%d/R3/Interpret/Failed/Sti", "The number of times STI was not interpreted.");
295 EM_REG_COUNTER_USED(&pStats->StatRZFailedXchg, "/EM/CPU%d/RZ/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
296 EM_REG_COUNTER_USED(&pStats->StatR3FailedXchg, "/EM/CPU%d/R3/Interpret/Failed/Xchg", "The number of times XCHG was not interpreted.");
297 EM_REG_COUNTER_USED(&pStats->StatRZFailedXor, "/EM/CPU%d/RZ/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
298 EM_REG_COUNTER_USED(&pStats->StatR3FailedXor, "/EM/CPU%d/R3/Interpret/Failed/Xor", "The number of times XOR was not interpreted.");
299 EM_REG_COUNTER_USED(&pStats->StatRZFailedMonitor, "/EM/CPU%d/RZ/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
300 EM_REG_COUNTER_USED(&pStats->StatR3FailedMonitor, "/EM/CPU%d/R3/Interpret/Failed/Monitor", "The number of times MONITOR was not interpreted.");
301 EM_REG_COUNTER_USED(&pStats->StatRZFailedMWait, "/EM/CPU%d/RZ/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
302 EM_REG_COUNTER_USED(&pStats->StatR3FailedMWait, "/EM/CPU%d/R3/Interpret/Failed/MWait", "The number of times MWAIT was not interpreted.");
303 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdtsc, "/EM/CPU%d/RZ/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
304 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdtsc, "/EM/CPU%d/R3/Interpret/Failed/Rdtsc", "The number of times RDTSC was not interpreted.");
305 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdpmc, "/EM/CPU%d/RZ/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
306 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdpmc, "/EM/CPU%d/R3/Interpret/Failed/Rdpmc", "The number of times RDPMC was not interpreted.");
307 EM_REG_COUNTER_USED(&pStats->StatRZFailedRdmsr, "/EM/CPU%d/RZ/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
308 EM_REG_COUNTER_USED(&pStats->StatR3FailedRdmsr, "/EM/CPU%d/R3/Interpret/Failed/Rdmsr", "The number of times RDMSR was not interpreted.");
309 EM_REG_COUNTER_USED(&pStats->StatRZFailedWrmsr, "/EM/CPU%d/RZ/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
310 EM_REG_COUNTER_USED(&pStats->StatR3FailedWrmsr, "/EM/CPU%d/R3/Interpret/Failed/Wrmsr", "The number of times WRMSR was not interpreted.");
311 EM_REG_COUNTER_USED(&pStats->StatRZFailedLmsw, "/EM/CPU%d/RZ/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
312 EM_REG_COUNTER_USED(&pStats->StatR3FailedLmsw, "/EM/CPU%d/R3/Interpret/Failed/Lmsw", "The number of times LMSW was not interpreted.");
313
314 EM_REG_COUNTER_USED(&pStats->StatRZFailedMisc, "/EM/CPU%d/RZ/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
315 EM_REG_COUNTER_USED(&pStats->StatR3FailedMisc, "/EM/CPU%d/R3/Interpret/Failed/Misc", "The number of times some misc instruction was encountered.");
316 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdd, "/EM/CPU%d/RZ/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
317 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdd, "/EM/CPU%d/R3/Interpret/Failed/Add", "The number of times ADD was not interpreted.");
318 EM_REG_COUNTER_USED(&pStats->StatRZFailedAdc, "/EM/CPU%d/RZ/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
319 EM_REG_COUNTER_USED(&pStats->StatR3FailedAdc, "/EM/CPU%d/R3/Interpret/Failed/Adc", "The number of times ADC was not interpreted.");
320 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtr, "/EM/CPU%d/RZ/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
321 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtr, "/EM/CPU%d/R3/Interpret/Failed/Btr", "The number of times BTR was not interpreted.");
322 EM_REG_COUNTER_USED(&pStats->StatRZFailedBts, "/EM/CPU%d/RZ/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
323 EM_REG_COUNTER_USED(&pStats->StatR3FailedBts, "/EM/CPU%d/R3/Interpret/Failed/Bts", "The number of times BTS was not interpreted.");
324 EM_REG_COUNTER_USED(&pStats->StatRZFailedBtc, "/EM/CPU%d/RZ/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
325 EM_REG_COUNTER_USED(&pStats->StatR3FailedBtc, "/EM/CPU%d/R3/Interpret/Failed/Btc", "The number of times BTC was not interpreted.");
326 EM_REG_COUNTER_USED(&pStats->StatRZFailedCli, "/EM/CPU%d/RZ/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
327 EM_REG_COUNTER_USED(&pStats->StatR3FailedCli, "/EM/CPU%d/R3/Interpret/Failed/Cli", "The number of times CLI was not interpreted.");
328 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
329 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg", "The number of times CMPXCHG was not interpreted.");
330 EM_REG_COUNTER_USED(&pStats->StatRZFailedCmpXchg8b, "/EM/CPU%d/RZ/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
331 EM_REG_COUNTER_USED(&pStats->StatR3FailedCmpXchg8b, "/EM/CPU%d/R3/Interpret/Failed/CmpXchg8b", "The number of times CMPXCHG8B was not interpreted.");
332 EM_REG_COUNTER_USED(&pStats->StatRZFailedXAdd, "/EM/CPU%d/RZ/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
333 EM_REG_COUNTER_USED(&pStats->StatR3FailedXAdd, "/EM/CPU%d/R3/Interpret/Failed/XAdd", "The number of times XADD was not interpreted.");
334 EM_REG_COUNTER_USED(&pStats->StatRZFailedMovNTPS, "/EM/CPU%d/RZ/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
335 EM_REG_COUNTER_USED(&pStats->StatR3FailedMovNTPS, "/EM/CPU%d/R3/Interpret/Failed/MovNTPS", "The number of times MOVNTPS was not interpreted.");
336 EM_REG_COUNTER_USED(&pStats->StatRZFailedStosWD, "/EM/CPU%d/RZ/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
337 EM_REG_COUNTER_USED(&pStats->StatR3FailedStosWD, "/EM/CPU%d/R3/Interpret/Failed/StosWD", "The number of times STOSWD was not interpreted.");
338 EM_REG_COUNTER_USED(&pStats->StatRZFailedSub, "/EM/CPU%d/RZ/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
339 EM_REG_COUNTER_USED(&pStats->StatR3FailedSub, "/EM/CPU%d/R3/Interpret/Failed/Sub", "The number of times SUB was not interpreted.");
340 EM_REG_COUNTER_USED(&pStats->StatRZFailedWbInvd, "/EM/CPU%d/RZ/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
341 EM_REG_COUNTER_USED(&pStats->StatR3FailedWbInvd, "/EM/CPU%d/R3/Interpret/Failed/WbInvd", "The number of times WBINVD was not interpreted.");
342
343 EM_REG_COUNTER_USED(&pStats->StatRZFailedUserMode, "/EM/CPU%d/RZ/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
344 EM_REG_COUNTER_USED(&pStats->StatR3FailedUserMode, "/EM/CPU%d/R3/Interpret/Failed/UserMode", "The number of rejections because of CPL.");
345 EM_REG_COUNTER_USED(&pStats->StatRZFailedPrefix, "/EM/CPU%d/RZ/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
346 EM_REG_COUNTER_USED(&pStats->StatR3FailedPrefix, "/EM/CPU%d/R3/Interpret/Failed/Prefix", "The number of rejections because of prefix .");
347
348 EM_REG_COUNTER_USED(&pStats->StatCli, "/EM/CPU%d/R3/PrivInst/Cli", "Number of cli instructions.");
349 EM_REG_COUNTER_USED(&pStats->StatSti, "/EM/CPU%d/R3/PrivInst/Sti", "Number of sli instructions.");
350 EM_REG_COUNTER_USED(&pStats->StatIn, "/EM/CPU%d/R3/PrivInst/In", "Number of in instructions.");
351 EM_REG_COUNTER_USED(&pStats->StatOut, "/EM/CPU%d/R3/PrivInst/Out", "Number of out instructions.");
352 EM_REG_COUNTER_USED(&pStats->StatIoRestarted, "/EM/CPU%d/R3/PrivInst/IoRestarted", "Number of restarted i/o instructions.");
353 EM_REG_COUNTER_USED(&pStats->StatHlt, "/EM/CPU%d/R3/PrivInst/Hlt", "Number of hlt instructions not handled in GC because of PATM.");
354 EM_REG_COUNTER_USED(&pStats->StatInvlpg, "/EM/CPU%d/R3/PrivInst/Invlpg", "Number of invlpg instructions.");
355 EM_REG_COUNTER_USED(&pStats->StatMisc, "/EM/CPU%d/R3/PrivInst/Misc", "Number of misc. instructions.");
356 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[0], "/EM/CPU%d/R3/PrivInst/Mov CR0, X", "Number of mov CR0 read instructions.");
357 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[1], "/EM/CPU%d/R3/PrivInst/Mov CR1, X", "Number of mov CR1 read instructions.");
358 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[2], "/EM/CPU%d/R3/PrivInst/Mov CR2, X", "Number of mov CR2 read instructions.");
359 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[3], "/EM/CPU%d/R3/PrivInst/Mov CR3, X", "Number of mov CR3 read instructions.");
360 EM_REG_COUNTER_USED(&pStats->StatMovWriteCR[4], "/EM/CPU%d/R3/PrivInst/Mov CR4, X", "Number of mov CR4 read instructions.");
361 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[0], "/EM/CPU%d/R3/PrivInst/Mov X, CR0", "Number of mov CR0 write instructions.");
362 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[1], "/EM/CPU%d/R3/PrivInst/Mov X, CR1", "Number of mov CR1 write instructions.");
363 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[2], "/EM/CPU%d/R3/PrivInst/Mov X, CR2", "Number of mov CR2 write instructions.");
364 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[3], "/EM/CPU%d/R3/PrivInst/Mov X, CR3", "Number of mov CR3 write instructions.");
365 EM_REG_COUNTER_USED(&pStats->StatMovReadCR[4], "/EM/CPU%d/R3/PrivInst/Mov X, CR4", "Number of mov CR4 write instructions.");
366 EM_REG_COUNTER_USED(&pStats->StatMovDRx, "/EM/CPU%d/R3/PrivInst/MovDRx", "Number of mov DRx instructions.");
367 EM_REG_COUNTER_USED(&pStats->StatIret, "/EM/CPU%d/R3/PrivInst/Iret", "Number of iret instructions.");
368 EM_REG_COUNTER_USED(&pStats->StatMovLgdt, "/EM/CPU%d/R3/PrivInst/Lgdt", "Number of lgdt instructions.");
369 EM_REG_COUNTER_USED(&pStats->StatMovLidt, "/EM/CPU%d/R3/PrivInst/Lidt", "Number of lidt instructions.");
370 EM_REG_COUNTER_USED(&pStats->StatMovLldt, "/EM/CPU%d/R3/PrivInst/Lldt", "Number of lldt instructions.");
371 EM_REG_COUNTER_USED(&pStats->StatSysEnter, "/EM/CPU%d/R3/PrivInst/Sysenter", "Number of sysenter instructions.");
372 EM_REG_COUNTER_USED(&pStats->StatSysExit, "/EM/CPU%d/R3/PrivInst/Sysexit", "Number of sysexit instructions.");
373 EM_REG_COUNTER_USED(&pStats->StatSysCall, "/EM/CPU%d/R3/PrivInst/Syscall", "Number of syscall instructions.");
374 EM_REG_COUNTER_USED(&pStats->StatSysRet, "/EM/CPU%d/R3/PrivInst/Sysret", "Number of sysret instructions.");
375
376 EM_REG_COUNTER(&pVCpu->em.s.StatTotalClis, "/EM/CPU%d/Cli/Total", "Total number of cli instructions executed.");
377 pVCpu->em.s.pCliStatTree = 0;
378
379 /* these should be considered for release statistics. */
380 EM_REG_COUNTER(&pVCpu->em.s.StatIOEmu, "/PROF/CPU%d/EM/Emulation/IO", "Profiling of emR3RawExecuteIOInstruction.");
381 EM_REG_COUNTER(&pVCpu->em.s.StatPrivEmu, "/PROF/CPU%d/EM/Emulation/Priv", "Profiling of emR3RawPrivileged.");
382 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccEntry, "/PROF/CPU%d/EM/HwAccEnter", "Profiling Hardware Accelerated Mode entry overhead.");
383 EM_REG_PROFILE(&pVCpu->em.s.StatHwAccExec, "/PROF/CPU%d/EM/HwAccExec", "Profiling Hardware Accelerated Mode execution.");
384 EM_REG_PROFILE(&pVCpu->em.s.StatREMEmu, "/PROF/CPU%d/EM/REMEmuSingle", "Profiling single instruction REM execution.");
385 EM_REG_PROFILE(&pVCpu->em.s.StatREMExec, "/PROF/CPU%d/EM/REMExec", "Profiling REM execution.");
386 EM_REG_PROFILE(&pVCpu->em.s.StatREMSync, "/PROF/CPU%d/EM/REMSync", "Profiling REM context syncing.");
387 EM_REG_PROFILE(&pVCpu->em.s.StatRAWEntry, "/PROF/CPU%d/EM/RAWEnter", "Profiling Raw Mode entry overhead.");
388 EM_REG_PROFILE(&pVCpu->em.s.StatRAWExec, "/PROF/CPU%d/EM/RAWExec", "Profiling Raw Mode execution.");
389 EM_REG_PROFILE(&pVCpu->em.s.StatRAWTail, "/PROF/CPU%d/EM/RAWTail", "Profiling Raw Mode tail overhead.");
390
391#endif /* VBOX_WITH_STATISTICS */
392
393 EM_REG_COUNTER(&pVCpu->em.s.StatForcedActions, "/PROF/CPU%d/EM/ForcedActions", "Profiling forced action execution.");
394 EM_REG_COUNTER(&pVCpu->em.s.StatHalted, "/PROF/CPU%d/EM/Halted", "Profiling halted state (VMR3WaitHalted).");
395 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatCapped, "/PROF/CPU%d/EM/Capped", "Profiling capped state (sleep).");
396 EM_REG_COUNTER(&pVCpu->em.s.StatREMTotal, "/PROF/CPU%d/EM/REMTotal", "Profiling emR3RemExecute (excluding FFs).");
397 EM_REG_COUNTER(&pVCpu->em.s.StatRAWTotal, "/PROF/CPU%d/EM/RAWTotal", "Profiling emR3RawExecute (excluding FFs).");
398
399 EM_REG_PROFILE_ADV(&pVCpu->em.s.StatTotal, "/PROF/CPU%d/EM/Total", "Profiling EMR3ExecuteVM.");
400 }
401
402 return VINF_SUCCESS;
403}
404
405
406/**
407 * Applies relocations to data and code managed by this
408 * component. This function will be called at init and
409 * whenever the VMM need to relocate it self inside the GC.
410 *
411 * @param pVM The VM.
412 */
413VMMR3DECL(void) EMR3Relocate(PVM pVM)
414{
415 LogFlow(("EMR3Relocate\n"));
416 for (VMCPUID i = 0; i < pVM->cCpus; i++)
417 {
418 PVMCPU pVCpu = &pVM->aCpus[i];
419 if (pVCpu->em.s.pStatsR3)
420 pVCpu->em.s.pStatsRC = MMHyperR3ToRC(pVM, pVCpu->em.s.pStatsR3);
421 }
422}
423
424
425/**
426 * Reset the EM state for a CPU.
427 *
428 * Called by EMR3Reset and hot plugging.
429 *
430 * @param pVCpu The virtual CPU.
431 */
432VMMR3DECL(void) EMR3ResetCpu(PVMCPU pVCpu)
433{
434 pVCpu->em.s.fForceRAW = false;
435
436 /* VMR3Reset may return VINF_EM_RESET or VINF_EM_SUSPEND, so transition
437 out of the HALTED state here so that enmPrevState doesn't end up as
438 HALTED when EMR3Execute returns. */
439 if (pVCpu->em.s.enmState == EMSTATE_HALTED)
440 {
441 Log(("EMR3ResetCpu: Cpu#%u %s -> %s\n", pVCpu->idCpu, emR3GetStateName(pVCpu->em.s.enmState), pVCpu->idCpu == 0 ? "EMSTATE_NONE" : "EMSTATE_WAIT_SIPI"));
442 pVCpu->em.s.enmState = pVCpu->idCpu == 0 ? EMSTATE_NONE : EMSTATE_WAIT_SIPI;
443 }
444}
445
446
447/**
448 * Reset notification.
449 *
450 * @param pVM The VM handle.
451 */
452VMMR3DECL(void) EMR3Reset(PVM pVM)
453{
454 Log(("EMR3Reset: \n"));
455 for (VMCPUID i = 0; i < pVM->cCpus; i++)
456 EMR3ResetCpu(&pVM->aCpus[i]);
457}
458
459
460/**
461 * Terminates the EM.
462 *
463 * Termination means cleaning up and freeing all resources,
464 * the VM it self is at this point powered off or suspended.
465 *
466 * @returns VBox status code.
467 * @param pVM The VM to operate on.
468 */
469VMMR3DECL(int) EMR3Term(PVM pVM)
470{
471 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
472
473 PDMR3CritSectDelete(&pVM->em.s.CritSectREM);
474 return VINF_SUCCESS;
475}
476
477
478/**
479 * Execute state save operation.
480 *
481 * @returns VBox status code.
482 * @param pVM VM Handle.
483 * @param pSSM SSM operation handle.
484 */
485static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
486{
487 for (VMCPUID i = 0; i < pVM->cCpus; i++)
488 {
489 PVMCPU pVCpu = &pVM->aCpus[i];
490
491 int rc = SSMR3PutBool(pSSM, pVCpu->em.s.fForceRAW);
492 AssertRCReturn(rc, rc);
493
494 Assert(pVCpu->em.s.enmState == EMSTATE_SUSPENDED);
495 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
496 rc = SSMR3PutU32(pSSM, pVCpu->em.s.enmPrevState);
497 AssertRCReturn(rc, rc);
498
499 /* Save mwait state. */
500 rc = SSMR3PutU32(pSSM, pVCpu->em.s.mwait.fWait);
501 AssertRCReturn(rc, rc);
502 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMWaitEAX);
503 AssertRCReturn(rc, rc);
504 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMWaitECX);
505 AssertRCReturn(rc, rc);
506 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorEAX);
507 AssertRCReturn(rc, rc);
508 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorECX);
509 AssertRCReturn(rc, rc);
510 rc = SSMR3PutGCPtr(pSSM, pVCpu->em.s.mwait.uMonitorEDX);
511 AssertRCReturn(rc, rc);
512 }
513 return VINF_SUCCESS;
514}
515
516
517/**
518 * Execute state load operation.
519 *
520 * @returns VBox status code.
521 * @param pVM VM Handle.
522 * @param pSSM SSM operation handle.
523 * @param uVersion Data layout version.
524 * @param uPass The data pass.
525 */
526static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t uVersion, uint32_t uPass)
527{
528 /*
529 * Validate version.
530 */
531 if ( uVersion != EM_SAVED_STATE_VERSION
532 && uVersion != EM_SAVED_STATE_VERSION_PRE_MWAIT
533 && uVersion != EM_SAVED_STATE_VERSION_PRE_SMP)
534 {
535 AssertMsgFailed(("emR3Load: Invalid version uVersion=%d (current %d)!\n", uVersion, EM_SAVED_STATE_VERSION));
536 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
537 }
538 Assert(uPass == SSM_PASS_FINAL); NOREF(uPass);
539
540 /*
541 * Load the saved state.
542 */
543 for (VMCPUID i = 0; i < pVM->cCpus; i++)
544 {
545 PVMCPU pVCpu = &pVM->aCpus[i];
546
547 int rc = SSMR3GetBool(pSSM, &pVCpu->em.s.fForceRAW);
548 if (RT_FAILURE(rc))
549 pVCpu->em.s.fForceRAW = false;
550 AssertRCReturn(rc, rc);
551
552 if (uVersion > EM_SAVED_STATE_VERSION_PRE_SMP)
553 {
554 AssertCompile(sizeof(pVCpu->em.s.enmPrevState) == sizeof(uint32_t));
555 rc = SSMR3GetU32(pSSM, (uint32_t *)&pVCpu->em.s.enmPrevState);
556 AssertRCReturn(rc, rc);
557 Assert(pVCpu->em.s.enmPrevState != EMSTATE_SUSPENDED);
558
559 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
560 }
561 if (uVersion > EM_SAVED_STATE_VERSION_PRE_MWAIT)
562 {
563 /* Load mwait state. */
564 rc = SSMR3GetU32(pSSM, &pVCpu->em.s.mwait.fWait);
565 AssertRCReturn(rc, rc);
566 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMWaitEAX);
567 AssertRCReturn(rc, rc);
568 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMWaitECX);
569 AssertRCReturn(rc, rc);
570 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorEAX);
571 AssertRCReturn(rc, rc);
572 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorECX);
573 AssertRCReturn(rc, rc);
574 rc = SSMR3GetGCPtr(pSSM, &pVCpu->em.s.mwait.uMonitorEDX);
575 AssertRCReturn(rc, rc);
576 }
577
578 Assert(!pVCpu->em.s.pCliStatTree);
579 }
580 return VINF_SUCCESS;
581}
582
583
584/**
585 * Raise a fatal error.
586 *
587 * Safely terminate the VM with full state report and stuff. This function
588 * will naturally never return.
589 *
590 * @param pVCpu VMCPU handle.
591 * @param rc VBox status code.
592 */
593VMMR3DECL(void) EMR3FatalError(PVMCPU pVCpu, int rc)
594{
595 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
596 longjmp(pVCpu->em.s.u.FatalLongJump, rc);
597 AssertReleaseMsgFailed(("longjmp returned!\n"));
598}
599
600
601/**
602 * Gets the EM state name.
603 *
604 * @returns pointer to read only state name,
605 * @param enmState The state.
606 */
607static const char *emR3GetStateName(EMSTATE enmState)
608{
609 switch (enmState)
610 {
611 case EMSTATE_NONE: return "EMSTATE_NONE";
612 case EMSTATE_RAW: return "EMSTATE_RAW";
613 case EMSTATE_HWACC: return "EMSTATE_HWACC";
614 case EMSTATE_REM: return "EMSTATE_REM";
615 case EMSTATE_PARAV: return "EMSTATE_PARAV";
616 case EMSTATE_HALTED: return "EMSTATE_HALTED";
617 case EMSTATE_WAIT_SIPI: return "EMSTATE_WAIT_SIPI";
618 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
619 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
620 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
621 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
622 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
623 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
624 default: return "Unknown!";
625 }
626}
627
628
629/**
630 * Debug loop.
631 *
632 * @returns VBox status code for EM.
633 * @param pVM VM handle.
634 * @param pVCpu VMCPU handle.
635 * @param rc Current EM VBox status code..
636 */
637static int emR3Debug(PVM pVM, PVMCPU pVCpu, int rc)
638{
639 for (;;)
640 {
641 Log(("emR3Debug: rc=%Rrc\n", rc));
642 const int rcLast = rc;
643
644 /*
645 * Debug related RC.
646 */
647 switch (rc)
648 {
649 /*
650 * Single step an instruction.
651 */
652 case VINF_EM_DBG_STEP:
653 if ( pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
654 || pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
655 || pVCpu->em.s.fForceRAW /* paranoia */)
656 rc = emR3RawStep(pVM, pVCpu);
657 else
658 {
659 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
660 rc = emR3RemStep(pVM, pVCpu);
661 }
662 break;
663
664 /*
665 * Simple events: stepped, breakpoint, stop/assertion.
666 */
667 case VINF_EM_DBG_STEPPED:
668 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
669 break;
670
671 case VINF_EM_DBG_BREAKPOINT:
672 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
673 break;
674
675 case VINF_EM_DBG_STOP:
676 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
677 break;
678
679 case VINF_EM_DBG_HYPER_STEPPED:
680 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
681 break;
682
683 case VINF_EM_DBG_HYPER_BREAKPOINT:
684 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
685 break;
686
687 case VINF_EM_DBG_HYPER_ASSERTION:
688 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
689 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetRZAssertMsg1(pVM), VMMR3GetRZAssertMsg2(pVM));
690 break;
691
692 /*
693 * Guru meditation.
694 */
695 case VERR_VMM_RING0_ASSERTION: /** @todo Make a guru meditation event! */
696 rc = DBGFR3EventSrc(pVM, DBGFEVENT_FATAL_ERROR, "VERR_VMM_RING0_ASSERTION", 0, NULL, NULL);
697 break;
698 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru meditation event! */
699 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
700 break;
701
702 default: /** @todo don't use default for guru, but make special errors code! */
703 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
704 break;
705 }
706
707 /*
708 * Process the result.
709 */
710 do
711 {
712 switch (rc)
713 {
714 /*
715 * Continue the debugging loop.
716 */
717 case VINF_EM_DBG_STEP:
718 case VINF_EM_DBG_STOP:
719 case VINF_EM_DBG_STEPPED:
720 case VINF_EM_DBG_BREAKPOINT:
721 case VINF_EM_DBG_HYPER_STEPPED:
722 case VINF_EM_DBG_HYPER_BREAKPOINT:
723 case VINF_EM_DBG_HYPER_ASSERTION:
724 break;
725
726 /*
727 * Resuming execution (in some form) has to be done here if we got
728 * a hypervisor debug event.
729 */
730 case VINF_SUCCESS:
731 case VINF_EM_RESUME:
732 case VINF_EM_SUSPEND:
733 case VINF_EM_RESCHEDULE:
734 case VINF_EM_RESCHEDULE_RAW:
735 case VINF_EM_RESCHEDULE_REM:
736 case VINF_EM_HALT:
737 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
738 {
739 rc = emR3RawResumeHyper(pVM, pVCpu);
740 if (rc != VINF_SUCCESS && RT_SUCCESS(rc))
741 continue;
742 }
743 if (rc == VINF_SUCCESS)
744 rc = VINF_EM_RESCHEDULE;
745 return rc;
746
747 /*
748 * The debugger isn't attached.
749 * We'll simply turn the thing off since that's the easiest thing to do.
750 */
751 case VERR_DBGF_NOT_ATTACHED:
752 switch (rcLast)
753 {
754 case VINF_EM_DBG_HYPER_STEPPED:
755 case VINF_EM_DBG_HYPER_BREAKPOINT:
756 case VINF_EM_DBG_HYPER_ASSERTION:
757 case VERR_TRPM_PANIC:
758 case VERR_TRPM_DONT_PANIC:
759 case VERR_VMM_RING0_ASSERTION:
760 case VERR_VMM_HYPER_CR3_MISMATCH:
761 case VERR_VMM_RING3_CALL_DISABLED:
762 return rcLast;
763 }
764 return VINF_EM_OFF;
765
766 /*
767 * Status codes terminating the VM in one or another sense.
768 */
769 case VINF_EM_TERMINATE:
770 case VINF_EM_OFF:
771 case VINF_EM_RESET:
772 case VINF_EM_NO_MEMORY:
773 case VINF_EM_RAW_STALE_SELECTOR:
774 case VINF_EM_RAW_IRET_TRAP:
775 case VERR_TRPM_PANIC:
776 case VERR_TRPM_DONT_PANIC:
777 case VERR_VMM_RING0_ASSERTION:
778 case VERR_VMM_HYPER_CR3_MISMATCH:
779 case VERR_VMM_RING3_CALL_DISABLED:
780 case VERR_INTERNAL_ERROR:
781 case VERR_INTERNAL_ERROR_2:
782 case VERR_INTERNAL_ERROR_3:
783 case VERR_INTERNAL_ERROR_4:
784 case VERR_INTERNAL_ERROR_5:
785 case VERR_IPE_UNEXPECTED_STATUS:
786 case VERR_IPE_UNEXPECTED_INFO_STATUS:
787 case VERR_IPE_UNEXPECTED_ERROR_STATUS:
788 return rc;
789
790 /*
791 * The rest is unexpected, and will keep us here.
792 */
793 default:
794 AssertMsgFailed(("Unexpected rc %Rrc!\n", rc));
795 break;
796 }
797 } while (false);
798 } /* debug for ever */
799}
800
801/**
802 * Steps recompiled code.
803 *
804 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
805 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
806 *
807 * @param pVM VM handle.
808 * @param pVCpu VMCPU handle.
809 */
810static int emR3RemStep(PVM pVM, PVMCPU pVCpu)
811{
812 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
813
814 EMRemLock(pVM);
815
816 /*
817 * Switch to REM, step instruction, switch back.
818 */
819 int rc = REMR3State(pVM, pVCpu);
820 if (RT_SUCCESS(rc))
821 {
822 rc = REMR3Step(pVM, pVCpu);
823 REMR3StateBack(pVM, pVCpu);
824 }
825 EMRemUnlock(pVM);
826
827 LogFlow(("emR3RemStep: returns %Rrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
828 return rc;
829}
830
831
832/**
833 * emR3RemExecute helper that syncs the state back from REM and leave the REM
834 * critical section.
835 *
836 * @returns false - new fInREMState value.
837 * @param pVM The VM handle.
838 * @param pVCpu The virtual CPU handle.
839 */
840DECLINLINE(bool) emR3RemExecuteSyncBack(PVM pVM, PVMCPU pVCpu)
841{
842 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, a);
843 REMR3StateBack(pVM, pVCpu);
844 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, a);
845
846 EMRemUnlock(pVM);
847 return false;
848}
849
850
851/**
852 * Executes recompiled code.
853 *
854 * This function contains the recompiler version of the inner
855 * execution loop (the outer loop being in EMR3ExecuteVM()).
856 *
857 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
858 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
859 *
860 * @param pVM VM handle.
861 * @param pVCpu VMCPU handle.
862 * @param pfFFDone Where to store an indicator telling whether or not
863 * FFs were done before returning.
864 *
865 */
866static int emR3RemExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
867{
868#ifdef LOG_ENABLED
869 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
870 uint32_t cpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
871
872 if (pCtx->eflags.Bits.u1VM)
873 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
874 else
875 Log(("EMR%d: %04X:%08X ESP=%08X IF=%d CR0=%x eflags=%x\n", cpl, pCtx->cs, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, pCtx->eflags.u));
876#endif
877 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatREMTotal, a);
878
879#if defined(VBOX_STRICT) && defined(DEBUG_bird)
880 AssertMsg( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
881 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVCpu)), /** @todo #1419 - get flat address. */
882 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVCpu), CPUMGetGuestEIP(pVCpu)));
883#endif
884
885 /*
886 * Spin till we get a forced action which returns anything but VINF_SUCCESS
887 * or the REM suggests raw-mode execution.
888 */
889 *pfFFDone = false;
890 bool fInREMState = false;
891 int rc = VINF_SUCCESS;
892 for (;;)
893 {
894 /*
895 * Lock REM and update the state if not already in sync.
896 *
897 * Note! Big lock, but you are not supposed to own any lock when
898 * coming in here.
899 */
900 if (!fInREMState)
901 {
902 EMRemLock(pVM);
903 STAM_PROFILE_START(&pVCpu->em.s.StatREMSync, b);
904
905 /* Flush the recompiler translation blocks if the VCPU has changed,
906 also force a full CPU state resync. */
907 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
908 {
909 REMFlushTBs(pVM);
910 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
911 }
912 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
913
914 rc = REMR3State(pVM, pVCpu);
915
916 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMSync, b);
917 if (RT_FAILURE(rc))
918 break;
919 fInREMState = true;
920
921 /*
922 * We might have missed the raising of VMREQ, TIMER and some other
923 * important FFs while we were busy switching the state. So, check again.
924 */
925 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_RESET)
926 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER | VMCPU_FF_REQUEST))
927 {
928 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fGlobalForcedActions));
929 goto l_REMDoForcedActions;
930 }
931 }
932
933
934 /*
935 * Execute REM.
936 */
937 if (RT_LIKELY(EMR3IsExecutionAllowed(pVM, pVCpu)))
938 {
939 STAM_PROFILE_START(&pVCpu->em.s.StatREMExec, c);
940 rc = REMR3Run(pVM, pVCpu);
941 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMExec, c);
942 }
943 else
944 {
945 /* Give up this time slice; virtual time continues */
946 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
947 RTThreadSleep(5);
948 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
949 rc = VINF_SUCCESS;
950 }
951
952 /*
953 * Deal with high priority post execution FFs before doing anything
954 * else. Sync back the state and leave the lock to be on the safe side.
955 */
956 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
957 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
958 {
959 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
960 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
961 }
962
963 /*
964 * Process the returned status code.
965 */
966 if (rc != VINF_SUCCESS)
967 {
968 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
969 break;
970 if (rc != VINF_REM_INTERRUPED_FF)
971 {
972 /*
973 * Anything which is not known to us means an internal error
974 * and the termination of the VM!
975 */
976 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Rra\n", rc));
977 break;
978 }
979 }
980
981
982 /*
983 * Check and execute forced actions.
984 *
985 * Sync back the VM state and leave the lock before calling any of
986 * these, you never know what's going to happen here.
987 */
988#ifdef VBOX_HIGH_RES_TIMERS_HACK
989 TMTimerPollVoid(pVM, pVCpu);
990#endif
991 AssertCompile((VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)) & VMCPU_FF_TIMER);
992 if ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
993 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK & ~(VMCPU_FF_CSAM_PENDING_ACTION | VMCPU_FF_CSAM_SCAN_PAGE)))
994 {
995l_REMDoForcedActions:
996 if (fInREMState)
997 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
998 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatREMTotal, a);
999 rc = emR3ForcedActions(pVM, pVCpu, rc);
1000 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatREMTotal, a);
1001 if ( rc != VINF_SUCCESS
1002 && rc != VINF_EM_RESCHEDULE_REM)
1003 {
1004 *pfFFDone = true;
1005 break;
1006 }
1007 }
1008
1009 } /* The Inner Loop, recompiled execution mode version. */
1010
1011
1012 /*
1013 * Returning. Sync back the VM state if required.
1014 */
1015 if (fInREMState)
1016 fInREMState = emR3RemExecuteSyncBack(pVM, pVCpu);
1017
1018 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatREMTotal, a);
1019 return rc;
1020}
1021
1022
1023#ifdef DEBUG
1024
1025int emR3SingleStepExecRem(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
1026{
1027 EMSTATE enmOldState = pVCpu->em.s.enmState;
1028
1029 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1030
1031 Log(("Single step BEGIN:\n"));
1032 for (uint32_t i = 0; i < cIterations; i++)
1033 {
1034 DBGFR3PrgStep(pVCpu);
1035 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
1036 emR3RemStep(pVM, pVCpu);
1037 if (emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx) != EMSTATE_REM)
1038 break;
1039 }
1040 Log(("Single step END:\n"));
1041 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
1042 pVCpu->em.s.enmState = enmOldState;
1043 return VINF_EM_RESCHEDULE;
1044}
1045
1046#endif /* DEBUG */
1047
1048
1049/**
1050 * Decides whether to execute RAW, HWACC or REM.
1051 *
1052 * @returns new EM state
1053 * @param pVM The VM.
1054 * @param pVCpu The VMCPU handle.
1055 * @param pCtx The CPU context.
1056 */
1057EMSTATE emR3Reschedule(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1058{
1059 /*
1060 * When forcing raw-mode execution, things are simple.
1061 */
1062 if (pVCpu->em.s.fForceRAW)
1063 return EMSTATE_RAW;
1064
1065 /*
1066 * We stay in the wait for SIPI state unless explicitly told otherwise.
1067 */
1068 if (pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI)
1069 return EMSTATE_WAIT_SIPI;
1070
1071 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1072 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1073 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
1074
1075 X86EFLAGS EFlags = pCtx->eflags;
1076 if (HWACCMIsEnabled(pVM))
1077 {
1078 /* Hardware accelerated raw-mode:
1079 *
1080 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
1081 */
1082 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
1083 return EMSTATE_HWACC;
1084
1085 /* Note: Raw mode and hw accelerated mode are incompatible. The latter turns
1086 * off monitoring features essential for raw mode! */
1087 return EMSTATE_REM;
1088 }
1089
1090 /*
1091 * Standard raw-mode:
1092 *
1093 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
1094 * or 32 bits protected mode ring 0 code
1095 *
1096 * The tests are ordered by the likelihood of being true during normal execution.
1097 */
1098 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
1099 {
1100 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
1101 return EMSTATE_REM;
1102 }
1103
1104#ifndef VBOX_RAW_V86
1105 if (EFlags.u32 & X86_EFL_VM) {
1106 Log2(("raw mode refused: VM_MASK\n"));
1107 return EMSTATE_REM;
1108 }
1109#endif
1110
1111 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
1112 uint32_t u32CR0 = pCtx->cr0;
1113 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
1114 {
1115 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
1116 return EMSTATE_REM;
1117 }
1118
1119 if (pCtx->cr4 & X86_CR4_PAE)
1120 {
1121 uint32_t u32Dummy, u32Features;
1122
1123 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
1124 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
1125 return EMSTATE_REM;
1126 }
1127
1128 unsigned uSS = pCtx->ss;
1129 if ( pCtx->eflags.Bits.u1VM
1130 || (uSS & X86_SEL_RPL) == 3)
1131 {
1132 if (!EMIsRawRing3Enabled(pVM))
1133 return EMSTATE_REM;
1134
1135 if (!(EFlags.u32 & X86_EFL_IF))
1136 {
1137 Log2(("raw mode refused: IF (RawR3)\n"));
1138 return EMSTATE_REM;
1139 }
1140
1141 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
1142 {
1143 Log2(("raw mode refused: CR0.WP + RawR0\n"));
1144 return EMSTATE_REM;
1145 }
1146 }
1147 else
1148 {
1149 if (!EMIsRawRing0Enabled(pVM))
1150 return EMSTATE_REM;
1151
1152 /* Only ring 0 supervisor code. */
1153 if ((uSS & X86_SEL_RPL) != 0)
1154 {
1155 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
1156 return EMSTATE_REM;
1157 }
1158
1159 // Let's start with pure 32 bits ring 0 code first
1160 /** @todo What's pure 32-bit mode? flat? */
1161 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
1162 || !(pCtx->csHid.Attr.n.u1DefBig))
1163 {
1164 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
1165 return EMSTATE_REM;
1166 }
1167
1168 /* Write protection must be turned on, or else the guest can overwrite our hypervisor code and data. */
1169 if (!(u32CR0 & X86_CR0_WP))
1170 {
1171 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
1172 return EMSTATE_REM;
1173 }
1174
1175 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
1176 {
1177 Log2(("raw r0 mode forced: patch code\n"));
1178 return EMSTATE_RAW;
1179 }
1180
1181#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
1182 if (!(EFlags.u32 & X86_EFL_IF))
1183 {
1184 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
1185 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
1186 return EMSTATE_REM;
1187 }
1188#endif
1189
1190 /** @todo still necessary??? */
1191 if (EFlags.Bits.u2IOPL != 0)
1192 {
1193 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
1194 return EMSTATE_REM;
1195 }
1196 }
1197
1198 Assert(PGMPhysIsA20Enabled(pVCpu));
1199 return EMSTATE_RAW;
1200}
1201
1202
1203/**
1204 * Executes all high priority post execution force actions.
1205 *
1206 * @returns rc or a fatal status code.
1207 *
1208 * @param pVM VM handle.
1209 * @param pVCpu VMCPU handle.
1210 * @param rc The current rc.
1211 */
1212int emR3HighPriorityPostForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1213{
1214 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PDM_CRITSECT))
1215 PDMCritSectFF(pVCpu);
1216
1217 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_PENDING_ACTION))
1218 CSAMR3DoPendingAction(pVM, pVCpu);
1219
1220 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1221 {
1222 if ( rc > VINF_EM_NO_MEMORY
1223 && rc <= VINF_EM_LAST)
1224 rc = VINF_EM_NO_MEMORY;
1225 }
1226
1227 return rc;
1228}
1229
1230
1231/**
1232 * Executes all pending forced actions.
1233 *
1234 * Forced actions can cause execution delays and execution
1235 * rescheduling. The first we deal with using action priority, so
1236 * that for instance pending timers aren't scheduled and ran until
1237 * right before execution. The rescheduling we deal with using
1238 * return codes. The same goes for VM termination, only in that case
1239 * we exit everything.
1240 *
1241 * @returns VBox status code of equal or greater importance/severity than rc.
1242 * The most important ones are: VINF_EM_RESCHEDULE,
1243 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1244 *
1245 * @param pVM VM handle.
1246 * @param pVCpu VMCPU handle.
1247 * @param rc The current rc.
1248 *
1249 */
1250int emR3ForcedActions(PVM pVM, PVMCPU pVCpu, int rc)
1251{
1252 STAM_REL_PROFILE_START(&pVCpu->em.s.StatForcedActions, a);
1253#ifdef VBOX_STRICT
1254 int rcIrq = VINF_SUCCESS;
1255#endif
1256 int rc2;
1257#define UPDATE_RC() \
1258 do { \
1259 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Rra\n", rc2)); \
1260 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
1261 break; \
1262 if (!rc || rc2 < rc) \
1263 rc = rc2; \
1264 } while (0)
1265
1266 /*
1267 * Post execution chunk first.
1268 */
1269 if ( VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK)
1270 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_POST_MASK))
1271 {
1272 /*
1273 * EMT Rendezvous (must be serviced before termination).
1274 */
1275 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1276 {
1277 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1278 UPDATE_RC();
1279 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1280 * stopped/reset before the next VM state change is made. We need a better
1281 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1282 * && rc >= VINF_EM_SUSPEND). */
1283 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1284 {
1285 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1286 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1287 return rc;
1288 }
1289 }
1290
1291 /*
1292 * State change request (cleared by vmR3SetStateLocked).
1293 */
1294 if (VM_FF_ISPENDING(pVM, VM_FF_CHECK_VM_STATE))
1295 {
1296 VMSTATE enmState = VMR3GetState(pVM);
1297 switch (enmState)
1298 {
1299 case VMSTATE_FATAL_ERROR:
1300 case VMSTATE_FATAL_ERROR_LS:
1301 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1302 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1303 return VINF_EM_SUSPEND;
1304
1305 case VMSTATE_DESTROYING:
1306 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1307 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1308 return VINF_EM_TERMINATE;
1309
1310 default:
1311 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1312 }
1313 }
1314
1315 /*
1316 * Debugger Facility polling.
1317 */
1318 if (VM_FF_ISPENDING(pVM, VM_FF_DBGF))
1319 {
1320 rc2 = DBGFR3VMMForcedAction(pVM);
1321 UPDATE_RC();
1322 }
1323
1324 /*
1325 * Postponed reset request.
1326 */
1327 if (VM_FF_TESTANDCLEAR(pVM, VM_FF_RESET))
1328 {
1329 rc2 = VMR3Reset(pVM);
1330 UPDATE_RC();
1331 }
1332
1333 /*
1334 * CSAM page scanning.
1335 */
1336 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1337 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE))
1338 {
1339 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1340
1341 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
1342 Log(("Forced action VMCPU_FF_CSAM_SCAN_PAGE\n"));
1343
1344 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1345 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_CSAM_SCAN_PAGE);
1346 }
1347
1348 /*
1349 * Out of memory? Putting this after CSAM as it may in theory cause us to run out of memory.
1350 */
1351 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1352 {
1353 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1354 UPDATE_RC();
1355 if (rc == VINF_EM_NO_MEMORY)
1356 return rc;
1357 }
1358
1359 /* check that we got them all */
1360 AssertCompile(VM_FF_NORMAL_PRIORITY_POST_MASK == (VM_FF_CHECK_VM_STATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1361 AssertCompile(VMCPU_FF_NORMAL_PRIORITY_POST_MASK == VMCPU_FF_CSAM_SCAN_PAGE);
1362 }
1363
1364 /*
1365 * Normal priority then.
1366 * (Executed in no particular order.)
1367 */
1368 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_NORMAL_PRIORITY_MASK, VM_FF_PGM_NO_MEMORY))
1369 {
1370 /*
1371 * PDM Queues are pending.
1372 */
1373 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_QUEUES, VM_FF_PGM_NO_MEMORY))
1374 PDMR3QueueFlushAll(pVM);
1375
1376 /*
1377 * PDM DMA transfers are pending.
1378 */
1379 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PDM_DMA, VM_FF_PGM_NO_MEMORY))
1380 PDMR3DmaRun(pVM);
1381
1382 /*
1383 * EMT Rendezvous (make sure they are handled before the requests).
1384 */
1385 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1386 {
1387 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1388 UPDATE_RC();
1389 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1390 * stopped/reset before the next VM state change is made. We need a better
1391 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1392 * && rc >= VINF_EM_SUSPEND). */
1393 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1394 {
1395 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1396 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1397 return rc;
1398 }
1399 }
1400
1401 /*
1402 * Requests from other threads.
1403 */
1404 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REQUEST, VM_FF_PGM_NO_MEMORY))
1405 {
1406 rc2 = VMR3ReqProcessU(pVM->pUVM, VMCPUID_ANY);
1407 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE) /** @todo this shouldn't be necessary */
1408 {
1409 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1410 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1411 return rc2;
1412 }
1413 UPDATE_RC();
1414 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1415 * stopped/reset before the next VM state change is made. We need a better
1416 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1417 * && rc >= VINF_EM_SUSPEND). */
1418 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1419 {
1420 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1421 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1422 return rc;
1423 }
1424 }
1425
1426 /* Replay the handler notification changes. */
1427 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_REM_HANDLER_NOTIFY, VM_FF_PGM_NO_MEMORY))
1428 {
1429 /* Try not to cause deadlocks. */
1430 if ( pVM->cCpus == 1
1431 || ( !PGMIsLockOwner(pVM)
1432 && !IOMIsLockOwner(pVM))
1433 )
1434 {
1435 EMRemLock(pVM);
1436 REMR3ReplayHandlerNotifications(pVM);
1437 EMRemUnlock(pVM);
1438 }
1439 }
1440
1441 /* check that we got them all */
1442 AssertCompile(VM_FF_NORMAL_PRIORITY_MASK == (VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA | VM_FF_REM_HANDLER_NOTIFY | VM_FF_EMT_RENDEZVOUS));
1443 }
1444
1445 /*
1446 * Normal priority then. (per-VCPU)
1447 * (Executed in no particular order.)
1448 */
1449 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1450 && VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_NORMAL_PRIORITY_MASK))
1451 {
1452 /*
1453 * Requests from other threads.
1454 */
1455 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
1456 {
1457 rc2 = VMR3ReqProcessU(pVM->pUVM, pVCpu->idCpu);
1458 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE || rc2 == VINF_EM_RESET)
1459 {
1460 Log2(("emR3ForcedActions: returns %Rrc\n", rc2));
1461 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1462 return rc2;
1463 }
1464 UPDATE_RC();
1465 /** @todo HACK ALERT! The following test is to make sure EM+TM things the VM is
1466 * stopped/reset before the next VM state change is made. We need a better
1467 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1468 * && rc >= VINF_EM_SUSPEND). */
1469 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1470 {
1471 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1472 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1473 return rc;
1474 }
1475 }
1476
1477 /* check that we got them all */
1478 Assert(!(VMCPU_FF_NORMAL_PRIORITY_MASK & ~(VMCPU_FF_REQUEST)));
1479 }
1480
1481 /*
1482 * High priority pre execution chunk last.
1483 * (Executed in ascending priority order.)
1484 */
1485 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK)
1486 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_MASK))
1487 {
1488 /*
1489 * Timers before interrupts.
1490 */
1491 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TIMER)
1492 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1493 TMR3TimerQueuesDo(pVM);
1494
1495 /*
1496 * The instruction following an emulated STI should *always* be executed!
1497 */
1498 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1499 && !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1500 {
1501 Log(("VM_FF_EMULATED_STI at %RGv successor %RGv\n", (RTGCPTR)CPUMGetGuestRIP(pVCpu), EMGetInhibitInterruptsPC(pVCpu)));
1502 if (CPUMGetGuestEIP(pVCpu) != EMGetInhibitInterruptsPC(pVCpu))
1503 {
1504 /* Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
1505 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
1506 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
1507 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
1508 */
1509 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
1510 }
1511 if (HWACCMR3IsActive(pVCpu))
1512 rc2 = VINF_EM_RESCHEDULE_HWACC;
1513 else
1514 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
1515
1516 UPDATE_RC();
1517 }
1518
1519 /*
1520 * Interrupts.
1521 */
1522 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)
1523 && !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS)
1524 && (!rc || rc >= VINF_EM_RESCHEDULE_HWACC)
1525 && !TRPMHasTrap(pVCpu) /* an interrupt could already be scheduled for dispatching in the recompiler. */
1526 && PATMAreInterruptsEnabled(pVM)
1527 && !HWACCMR3IsEventPending(pVCpu))
1528 {
1529 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1530 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
1531 {
1532 /* Note: it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
1533 /** @todo this really isn't nice, should properly handle this */
1534 rc2 = TRPMR3InjectEvent(pVM, pVCpu, TRPM_HARDWARE_INT);
1535#ifdef VBOX_STRICT
1536 rcIrq = rc2;
1537#endif
1538 UPDATE_RC();
1539 }
1540 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
1541 else if (REMR3QueryPendingInterrupt(pVM, pVCpu) != REM_NO_PENDING_IRQ)
1542 {
1543 rc2 = VINF_EM_RESCHEDULE_REM;
1544 UPDATE_RC();
1545 }
1546 }
1547
1548 /*
1549 * Allocate handy pages.
1550 */
1551 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1552 {
1553 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1554 UPDATE_RC();
1555 }
1556
1557 /*
1558 * Debugger Facility request.
1559 */
1560 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_DBGF, VM_FF_PGM_NO_MEMORY))
1561 {
1562 rc2 = DBGFR3VMMForcedAction(pVM);
1563 UPDATE_RC();
1564 }
1565
1566 /*
1567 * EMT Rendezvous (must be serviced before termination).
1568 */
1569 if (VM_FF_ISPENDING(pVM, VM_FF_EMT_RENDEZVOUS))
1570 {
1571 rc2 = VMMR3EmtRendezvousFF(pVM, pVCpu);
1572 UPDATE_RC();
1573 /** @todo HACK ALERT! The following test is to make sure EM+TM thinks the VM is
1574 * stopped/reset before the next VM state change is made. We need a better
1575 * solution for this, or at least make it possible to do: (rc >= VINF_EM_FIRST
1576 * && rc >= VINF_EM_SUSPEND). */
1577 if (RT_UNLIKELY(rc == VINF_EM_SUSPEND || rc == VINF_EM_RESET || rc == VINF_EM_OFF))
1578 {
1579 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1580 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1581 return rc;
1582 }
1583 }
1584
1585 /*
1586 * State change request (cleared by vmR3SetStateLocked).
1587 */
1588 if (VM_FF_ISPENDING(pVM, VM_FF_CHECK_VM_STATE))
1589 {
1590 VMSTATE enmState = VMR3GetState(pVM);
1591 switch (enmState)
1592 {
1593 case VMSTATE_FATAL_ERROR:
1594 case VMSTATE_FATAL_ERROR_LS:
1595 Log2(("emR3ForcedActions: %s -> VINF_EM_SUSPEND\n", VMGetStateName(enmState) ));
1596 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1597 return VINF_EM_SUSPEND;
1598
1599 case VMSTATE_DESTROYING:
1600 Log2(("emR3ForcedActions: %s -> VINF_EM_TERMINATE\n", VMGetStateName(enmState) ));
1601 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1602 return VINF_EM_TERMINATE;
1603
1604 default:
1605 AssertMsgFailed(("%s\n", VMGetStateName(enmState)));
1606 }
1607 }
1608
1609 /*
1610 * Out of memory? Since most of our fellow high priority actions may cause us
1611 * to run out of memory, we're employing VM_FF_IS_PENDING_EXCEPT and putting this
1612 * at the end rather than the start. Also, VM_FF_TERMINATE has higher priority
1613 * than us since we can terminate without allocating more memory.
1614 */
1615 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1616 {
1617 rc2 = PGMR3PhysAllocateHandyPages(pVM);
1618 UPDATE_RC();
1619 if (rc == VINF_EM_NO_MEMORY)
1620 return rc;
1621 }
1622
1623 /*
1624 * If the virtual sync clock is still stopped, make TM restart it.
1625 */
1626 if (VM_FF_ISPENDING(pVM, VM_FF_TM_VIRTUAL_SYNC))
1627 TMR3VirtualSyncFF(pVM, pVCpu);
1628
1629#ifdef DEBUG
1630 /*
1631 * Debug, pause the VM.
1632 */
1633 if (VM_FF_ISPENDING(pVM, VM_FF_DEBUG_SUSPEND))
1634 {
1635 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
1636 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
1637 return VINF_EM_SUSPEND;
1638 }
1639#endif
1640
1641 /* check that we got them all */
1642 AssertCompile(VM_FF_HIGH_PRIORITY_PRE_MASK == (VM_FF_TM_VIRTUAL_SYNC | VM_FF_DBGF | VM_FF_CHECK_VM_STATE | VM_FF_DEBUG_SUSPEND | VM_FF_PGM_NEED_HANDY_PAGES | VM_FF_PGM_NO_MEMORY | VM_FF_EMT_RENDEZVOUS));
1643 AssertCompile(VMCPU_FF_HIGH_PRIORITY_PRE_MASK == (VMCPU_FF_TIMER | VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_TRPM_SYNC_IDT | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT | VMCPU_FF_INHIBIT_INTERRUPTS));
1644 }
1645
1646#undef UPDATE_RC
1647 Log2(("emR3ForcedActions: returns %Rrc\n", rc));
1648 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatForcedActions, a);
1649 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
1650 return rc;
1651}
1652
1653
1654/**
1655 * Check if the preset execution time cap restricts guest execution scheduling.
1656 *
1657 * @returns true if allowed, false otherwise
1658 * @param pVM The VM to operate on.
1659 * @param pVCpu The VMCPU to operate on.
1660 *
1661 */
1662VMMR3DECL(bool) EMR3IsExecutionAllowed(PVM pVM, PVMCPU pVCpu)
1663{
1664 uint64_t u64UserTime, u64KernelTime;
1665
1666 if ( pVM->uCpuExecutionCap != 100
1667 && RT_SUCCESS(RTThreadGetExecutionTimeMilli(&u64KernelTime, &u64UserTime)))
1668 {
1669 uint64_t u64TimeNow = RTTimeMilliTS();
1670 if (pVCpu->em.s.u64TimeSliceStart + EM_TIME_SLICE < u64TimeNow)
1671 {
1672 /* New time slice. */
1673 pVCpu->em.s.u64TimeSliceStart = u64TimeNow;
1674 pVCpu->em.s.u64TimeSliceStartExec = u64KernelTime + u64UserTime;
1675 pVCpu->em.s.u64TimeSliceExec = 0;
1676 }
1677 pVCpu->em.s.u64TimeSliceExec = u64KernelTime + u64UserTime - pVCpu->em.s.u64TimeSliceStartExec;
1678
1679 Log2(("emR3IsExecutionAllowed: start=%RX64 startexec=%RX64 exec=%RX64 (cap=%x)\n", pVCpu->em.s.u64TimeSliceStart, pVCpu->em.s.u64TimeSliceStartExec, pVCpu->em.s.u64TimeSliceExec, (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100));
1680 if (pVCpu->em.s.u64TimeSliceExec >= (EM_TIME_SLICE * pVM->uCpuExecutionCap) / 100)
1681 return false;
1682 }
1683 return true;
1684}
1685
1686
1687/**
1688 * Execute VM.
1689 *
1690 * This function is the main loop of the VM. The emulation thread
1691 * calls this function when the VM has been successfully constructed
1692 * and we're ready for executing the VM.
1693 *
1694 * Returning from this function means that the VM is turned off or
1695 * suspended (state already saved) and deconstruction in next in line.
1696 *
1697 * All interaction from other thread are done using forced actions
1698 * and signaling of the wait object.
1699 *
1700 * @returns VBox status code, informational status codes may indicate failure.
1701 * @param pVM The VM to operate on.
1702 * @param pVCpu The VMCPU to operate on.
1703 */
1704VMMR3DECL(int) EMR3ExecuteVM(PVM pVM, PVMCPU pVCpu)
1705{
1706 Log(("EMR3ExecuteVM: pVM=%p enmVMState=%d (%s) enmState=%d (%s) enmPrevState=%d (%s) fForceRAW=%RTbool\n",
1707 pVM,
1708 pVM->enmVMState, VMR3GetStateName(pVM->enmVMState),
1709 pVCpu->em.s.enmState, emR3GetStateName(pVCpu->em.s.enmState),
1710 pVCpu->em.s.enmPrevState, emR3GetStateName(pVCpu->em.s.enmPrevState),
1711 pVCpu->em.s.fForceRAW));
1712 VM_ASSERT_EMT(pVM);
1713 AssertMsg( pVCpu->em.s.enmState == EMSTATE_NONE
1714 || pVCpu->em.s.enmState == EMSTATE_WAIT_SIPI
1715 || pVCpu->em.s.enmState == EMSTATE_SUSPENDED,
1716 ("%s\n", emR3GetStateName(pVCpu->em.s.enmState)));
1717
1718 int rc = setjmp(pVCpu->em.s.u.FatalLongJump);
1719 if (rc == 0)
1720 {
1721 /*
1722 * Start the virtual time.
1723 */
1724 TMR3NotifyResume(pVM, pVCpu);
1725
1726 /*
1727 * The Outer Main Loop.
1728 */
1729 bool fFFDone = false;
1730
1731 /* Reschedule right away to start in the right state. */
1732 rc = VINF_SUCCESS;
1733
1734 /* If resuming after a pause or a state load, restore the previous
1735 state or else we'll start executing code. Else, just reschedule. */
1736 if ( pVCpu->em.s.enmState == EMSTATE_SUSPENDED
1737 && ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1738 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED))
1739 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
1740 else
1741 pVCpu->em.s.enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1742
1743 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1744 for (;;)
1745 {
1746 /*
1747 * Before we can schedule anything (we're here because
1748 * scheduling is required) we must service any pending
1749 * forced actions to avoid any pending action causing
1750 * immediate rescheduling upon entering an inner loop
1751 *
1752 * Do forced actions.
1753 */
1754 if ( !fFFDone
1755 && rc != VINF_EM_TERMINATE
1756 && rc != VINF_EM_OFF
1757 && ( VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK)
1758 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_ALL_BUT_RAW_MASK)))
1759 {
1760 rc = emR3ForcedActions(pVM, pVCpu, rc);
1761 if ( ( rc == VINF_EM_RESCHEDULE_REM
1762 || rc == VINF_EM_RESCHEDULE_HWACC)
1763 && pVCpu->em.s.fForceRAW)
1764 rc = VINF_EM_RESCHEDULE_RAW;
1765 }
1766 else if (fFFDone)
1767 fFFDone = false;
1768
1769 /*
1770 * Now what to do?
1771 */
1772 Log2(("EMR3ExecuteVM: rc=%Rrc\n", rc));
1773 switch (rc)
1774 {
1775 /*
1776 * Keep doing what we're currently doing.
1777 */
1778 case VINF_SUCCESS:
1779 break;
1780
1781 /*
1782 * Reschedule - to raw-mode execution.
1783 */
1784 case VINF_EM_RESCHEDULE_RAW:
1785 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVCpu->em.s.enmState, EMSTATE_RAW));
1786 pVCpu->em.s.enmState = EMSTATE_RAW;
1787 break;
1788
1789 /*
1790 * Reschedule - to hardware accelerated raw-mode execution.
1791 */
1792 case VINF_EM_RESCHEDULE_HWACC:
1793 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVCpu->em.s.enmState, EMSTATE_HWACC));
1794 Assert(!pVCpu->em.s.fForceRAW);
1795 pVCpu->em.s.enmState = EMSTATE_HWACC;
1796 break;
1797
1798 /*
1799 * Reschedule - to recompiled execution.
1800 */
1801 case VINF_EM_RESCHEDULE_REM:
1802 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVCpu->em.s.enmState, EMSTATE_REM));
1803 pVCpu->em.s.enmState = EMSTATE_REM;
1804 break;
1805
1806 /*
1807 * Resume.
1808 */
1809 case VINF_EM_RESUME:
1810 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVCpu->em.s.enmState));
1811 /* Don't reschedule in the halted or wait for SIPI case. */
1812 if ( pVCpu->em.s.enmPrevState == EMSTATE_WAIT_SIPI
1813 || pVCpu->em.s.enmPrevState == EMSTATE_HALTED)
1814 break;
1815 /* fall through and get scheduled. */
1816
1817 /*
1818 * Reschedule.
1819 */
1820 case VINF_EM_RESCHEDULE:
1821 {
1822 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1823 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1824 pVCpu->em.s.enmState = enmState;
1825 break;
1826 }
1827
1828 /*
1829 * Halted.
1830 */
1831 case VINF_EM_HALT:
1832 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_HALTED));
1833 pVCpu->em.s.enmState = EMSTATE_HALTED;
1834 break;
1835
1836 /*
1837 * Switch to the wait for SIPI state (application processor only)
1838 */
1839 case VINF_EM_WAIT_SIPI:
1840 Assert(pVCpu->idCpu != 0);
1841 Log2(("EMR3ExecuteVM: VINF_EM_WAIT_SIPI: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_WAIT_SIPI));
1842 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1843 break;
1844
1845
1846 /*
1847 * Suspend.
1848 */
1849 case VINF_EM_SUSPEND:
1850 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1851 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1852 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1853 break;
1854
1855 /*
1856 * Reset.
1857 * We might end up doing a double reset for now, we'll have to clean up the mess later.
1858 */
1859 case VINF_EM_RESET:
1860 {
1861 if (pVCpu->idCpu == 0)
1862 {
1863 EMSTATE enmState = emR3Reschedule(pVM, pVCpu, pVCpu->em.s.pCtx);
1864 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d (%s)\n", pVCpu->em.s.enmState, enmState, emR3GetStateName(enmState)));
1865 pVCpu->em.s.enmState = enmState;
1866 }
1867 else
1868 {
1869 /* All other VCPUs go into the wait for SIPI state. */
1870 pVCpu->em.s.enmState = EMSTATE_WAIT_SIPI;
1871 }
1872 break;
1873 }
1874
1875 /*
1876 * Power Off.
1877 */
1878 case VINF_EM_OFF:
1879 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1880 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1881 TMR3NotifySuspend(pVM, pVCpu);
1882 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1883 return rc;
1884
1885 /*
1886 * Terminate the VM.
1887 */
1888 case VINF_EM_TERMINATE:
1889 pVCpu->em.s.enmState = EMSTATE_TERMINATING;
1890 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVCpu->em.s.enmState, EMSTATE_TERMINATING));
1891 if (pVM->enmVMState < VMSTATE_DESTROYING) /* ugly */
1892 TMR3NotifySuspend(pVM, pVCpu);
1893 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1894 return rc;
1895
1896
1897 /*
1898 * Out of memory, suspend the VM and stuff.
1899 */
1900 case VINF_EM_NO_MEMORY:
1901 Log2(("EMR3ExecuteVM: VINF_EM_NO_MEMORY: %d -> %d\n", pVCpu->em.s.enmState, EMSTATE_SUSPENDED));
1902 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
1903 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
1904 TMR3NotifySuspend(pVM, pVCpu);
1905 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
1906
1907 rc = VMSetRuntimeError(pVM, VMSETRTERR_FLAGS_SUSPEND, "HostMemoryLow",
1908 N_("Unable to allocate and lock memory. The virtual machine will be paused. Please close applications to free up memory or close the VM"));
1909 if (rc != VINF_EM_SUSPEND)
1910 {
1911 if (RT_SUCCESS_NP(rc))
1912 {
1913 AssertLogRelMsgFailed(("%Rrc\n", rc));
1914 rc = VERR_EM_INTERNAL_ERROR;
1915 }
1916 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1917 }
1918 return rc;
1919
1920 /*
1921 * Guest debug events.
1922 */
1923 case VINF_EM_DBG_STEPPED:
1924 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
1925 case VINF_EM_DBG_STOP:
1926 case VINF_EM_DBG_BREAKPOINT:
1927 case VINF_EM_DBG_STEP:
1928 if (pVCpu->em.s.enmState == EMSTATE_RAW)
1929 {
1930 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
1931 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1932 }
1933 else
1934 {
1935 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
1936 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1937 }
1938 break;
1939
1940 /*
1941 * Hypervisor debug events.
1942 */
1943 case VINF_EM_DBG_HYPER_STEPPED:
1944 case VINF_EM_DBG_HYPER_BREAKPOINT:
1945 case VINF_EM_DBG_HYPER_ASSERTION:
1946 Log2(("EMR3ExecuteVM: %Rrc: %d -> %d\n", rc, pVCpu->em.s.enmState, EMSTATE_DEBUG_HYPER));
1947 pVCpu->em.s.enmState = EMSTATE_DEBUG_HYPER;
1948 break;
1949
1950 /*
1951 * Guru mediations.
1952 */
1953 case VERR_VMM_RING0_ASSERTION:
1954 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
1955 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1956 break;
1957
1958 /*
1959 * Any error code showing up here other than the ones we
1960 * know and process above are considered to be FATAL.
1961 *
1962 * Unknown warnings and informational status codes are also
1963 * included in this.
1964 */
1965 default:
1966 if (RT_SUCCESS_NP(rc))
1967 {
1968 AssertMsgFailed(("Unexpected warning or informational status code %Rra!\n", rc));
1969 rc = VERR_EM_INTERNAL_ERROR;
1970 }
1971 Log(("EMR3ExecuteVM: %Rrc: %d -> %d (EMSTATE_GURU_MEDITATION)\n", rc, pVCpu->em.s.enmState, EMSTATE_GURU_MEDITATION));
1972 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
1973 break;
1974 }
1975
1976 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x); /* (skip this in release) */
1977 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
1978
1979 /*
1980 * Act on the state.
1981 */
1982 switch (pVCpu->em.s.enmState)
1983 {
1984 /*
1985 * Execute raw.
1986 */
1987 case EMSTATE_RAW:
1988 rc = emR3RawExecute(pVM, pVCpu, &fFFDone);
1989 break;
1990
1991 /*
1992 * Execute hardware accelerated raw.
1993 */
1994 case EMSTATE_HWACC:
1995 rc = emR3HwAccExecute(pVM, pVCpu, &fFFDone);
1996 break;
1997
1998 /*
1999 * Execute recompiled.
2000 */
2001 case EMSTATE_REM:
2002 rc = emR3RemExecute(pVM, pVCpu, &fFFDone);
2003 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Rrc\n", rc));
2004 break;
2005
2006 /*
2007 * Application processor execution halted until SIPI.
2008 */
2009 case EMSTATE_WAIT_SIPI:
2010 /* no break */
2011 /*
2012 * hlt - execution halted until interrupt.
2013 */
2014 case EMSTATE_HALTED:
2015 {
2016 STAM_REL_PROFILE_START(&pVCpu->em.s.StatHalted, y);
2017 if (pVCpu->em.s.mwait.fWait & EMMWAIT_FLAG_ACTIVE)
2018 {
2019 /* mwait has a special extension where it's woken up when an interrupt is pending even when IF=0. */
2020 rc = VMR3WaitHalted(pVM, pVCpu, !(pVCpu->em.s.mwait.fWait & EMMWAIT_FLAG_BREAKIRQIF0) && !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2021 pVCpu->em.s.mwait.fWait &= ~(EMMWAIT_FLAG_ACTIVE | EMMWAIT_FLAG_BREAKIRQIF0);
2022 }
2023 else
2024 rc = VMR3WaitHalted(pVM, pVCpu, !(CPUMGetGuestEFlags(pVCpu) & X86_EFL_IF));
2025
2026 STAM_REL_PROFILE_STOP(&pVCpu->em.s.StatHalted, y);
2027 break;
2028 }
2029
2030 /*
2031 * Suspended - return to VM.cpp.
2032 */
2033 case EMSTATE_SUSPENDED:
2034 TMR3NotifySuspend(pVM, pVCpu);
2035 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2036 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2037 return VINF_EM_SUSPEND;
2038
2039 /*
2040 * Debugging in the guest.
2041 */
2042 case EMSTATE_DEBUG_GUEST_REM:
2043 case EMSTATE_DEBUG_GUEST_RAW:
2044 TMR3NotifySuspend(pVM, pVCpu);
2045 rc = emR3Debug(pVM, pVCpu, rc);
2046 TMR3NotifyResume(pVM, pVCpu);
2047 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2048 break;
2049
2050 /*
2051 * Debugging in the hypervisor.
2052 */
2053 case EMSTATE_DEBUG_HYPER:
2054 {
2055 TMR3NotifySuspend(pVM, pVCpu);
2056 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2057
2058 rc = emR3Debug(pVM, pVCpu, rc);
2059 Log2(("EMR3ExecuteVM: enmr3Debug -> %Rrc (state %d)\n", rc, pVCpu->em.s.enmState));
2060 if (rc != VINF_SUCCESS)
2061 {
2062 /* switch to guru meditation mode */
2063 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2064 VMMR3FatalDump(pVM, pVCpu, rc);
2065 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2066 return rc;
2067 }
2068
2069 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatTotal, x);
2070 TMR3NotifyResume(pVM, pVCpu);
2071 break;
2072 }
2073
2074 /*
2075 * Guru meditation takes place in the debugger.
2076 */
2077 case EMSTATE_GURU_MEDITATION:
2078 {
2079 TMR3NotifySuspend(pVM, pVCpu);
2080 VMMR3FatalDump(pVM, pVCpu, rc);
2081 emR3Debug(pVM, pVCpu, rc);
2082 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2083 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2084 return rc;
2085 }
2086
2087 /*
2088 * The states we don't expect here.
2089 */
2090 case EMSTATE_NONE:
2091 case EMSTATE_TERMINATING:
2092 default:
2093 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVCpu->em.s.enmState));
2094 pVCpu->em.s.enmState = EMSTATE_GURU_MEDITATION;
2095 TMR3NotifySuspend(pVM, pVCpu);
2096 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2097 Log(("EMR3ExecuteVM: actually returns %Rrc (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2098 return VERR_EM_INTERNAL_ERROR;
2099 }
2100 } /* The Outer Main Loop */
2101 }
2102 else
2103 {
2104 /*
2105 * Fatal error.
2106 */
2107 Log(("EMR3ExecuteVM: returns %Rrc because of longjmp / fatal error; (state %s / %s)\n", rc, emR3GetStateName(pVCpu->em.s.enmState), emR3GetStateName(pVCpu->em.s.enmPrevState)));
2108 TMR3NotifySuspend(pVM, pVCpu);
2109 VMMR3FatalDump(pVM, pVCpu, rc);
2110 emR3Debug(pVM, pVCpu, rc);
2111 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatTotal, x);
2112 /** @todo change the VM state! */
2113 return rc;
2114 }
2115
2116 /* (won't ever get here). */
2117 AssertFailed();
2118}
2119
2120/**
2121 * Notify EM of a state change (used by FTM)
2122 *
2123 * @param pVM VM Handle.
2124 */
2125VMMR3DECL(int) EMR3NotifySuspend(PVM pVM)
2126{
2127 PVMCPU pVCpu = VMMGetCpu(pVM);
2128
2129 TMR3NotifySuspend(pVM, pVCpu); /* Stop the virtual time. */
2130 pVCpu->em.s.enmPrevState = pVCpu->em.s.enmState;
2131 pVCpu->em.s.enmState = EMSTATE_SUSPENDED;
2132 return VINF_SUCCESS;
2133}
2134
2135/**
2136 * Notify EM of a state change (used by FTM)
2137 *
2138 * @param pVM VM Handle.
2139 */
2140VMMR3DECL(int) EMR3NotifyResume(PVM pVM)
2141{
2142 PVMCPU pVCpu = VMMGetCpu(pVM);
2143 EMSTATE enmCurState = pVCpu->em.s.enmState;
2144
2145 TMR3NotifyResume(pVM, pVCpu); /* Resume the virtual time. */
2146 pVCpu->em.s.enmState = pVCpu->em.s.enmPrevState;
2147 pVCpu->em.s.enmPrevState = enmCurState;
2148 return VINF_SUCCESS;
2149}
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette