VirtualBox

source: vbox/trunk/src/VBox/VMM/EM.cpp@ 9016

Last change on this file since 9016 was 8985, checked in by vboxsync, 17 years ago

Added an extra argument to TRPMForwardTrap so the trpm profiling started in the trap handler assembly code are stopped correctly. Enabled the #UD forwarding for ring-0 traps (dtrace experiment).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 140.6 KB
Line 
1/* $Id: EM.cpp 8985 2008-05-20 21:03:33Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor/Manager.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/** @page pg_em EM - The Execution Monitor/Manager
24 *
25 * The Execution Monitor/Manager is responsible for running the VM, scheduling
26 * the right kind of execution (Raw, Recompiled, Interpreted,..), and keeping
27 * the CPU states in sync. The function RMR3ExecuteVM() is the 'main-loop' of
28 * the VM.
29 *
30 */
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#define LOG_GROUP LOG_GROUP_EM
36#include <VBox/em.h>
37#include <VBox/vmm.h>
38#include <VBox/patm.h>
39#include <VBox/csam.h>
40#include <VBox/selm.h>
41#include <VBox/trpm.h>
42#include <VBox/iom.h>
43#include <VBox/dbgf.h>
44#include <VBox/pgm.h>
45#include <VBox/rem.h>
46#include <VBox/tm.h>
47#include <VBox/mm.h>
48#include <VBox/ssm.h>
49#include <VBox/pdmapi.h>
50#include <VBox/pdmcritsect.h>
51#include <VBox/pdmqueue.h>
52#include <VBox/hwaccm.h>
53#include <VBox/patm.h>
54#include "EMInternal.h"
55#include <VBox/vm.h>
56#include <VBox/cpumdis.h>
57#include <VBox/dis.h>
58#include <VBox/disopcode.h>
59#include <VBox/dbgf.h>
60
61#include <VBox/log.h>
62#include <iprt/thread.h>
63#include <iprt/assert.h>
64#include <iprt/asm.h>
65#include <iprt/semaphore.h>
66#include <iprt/string.h>
67#include <iprt/avl.h>
68#include <iprt/stream.h>
69#include <VBox/param.h>
70#include <VBox/err.h>
71
72
73/*******************************************************************************
74* Internal Functions *
75*******************************************************************************/
76static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM);
77static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version);
78static int emR3Debug(PVM pVM, int rc);
79static int emR3RemStep(PVM pVM);
80static int emR3RemExecute(PVM pVM, bool *pfFFDone);
81static int emR3RawResumeHyper(PVM pVM);
82static int emR3RawStep(PVM pVM);
83DECLINLINE(int) emR3RawHandleRC(PVM pVM, PCPUMCTX pCtx, int rc);
84DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PCPUMCTX pCtx, int rc);
85static int emR3RawForcedActions(PVM pVM, PCPUMCTX pCtx);
86static int emR3RawExecute(PVM pVM, bool *pfFFDone);
87DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, const char *pszPrefix, int rcGC = VINF_SUCCESS);
88static int emR3HighPriorityPostForcedActions(PVM pVM, int rc);
89static int emR3ForcedActions(PVM pVM, int rc);
90static int emR3RawGuestTrap(PVM pVM);
91
92
93/**
94 * Initializes the EM.
95 *
96 * @returns VBox status code.
97 * @param pVM The VM to operate on.
98 */
99EMR3DECL(int) EMR3Init(PVM pVM)
100{
101 LogFlow(("EMR3Init\n"));
102 /*
103 * Assert alignment and sizes.
104 */
105 AssertRelease(!(RT_OFFSETOF(VM, em.s) & 31));
106 AssertRelease(sizeof(pVM->em.s) <= sizeof(pVM->em.padding));
107 AssertReleaseMsg(sizeof(pVM->em.s.u.FatalLongJump) <= sizeof(pVM->em.s.u.achPaddingFatalLongJump),
108 ("%d bytes, padding %d\n", sizeof(pVM->em.s.u.FatalLongJump), sizeof(pVM->em.s.u.achPaddingFatalLongJump)));
109
110 /*
111 * Init the structure.
112 */
113 pVM->em.s.offVM = RT_OFFSETOF(VM, em.s);
114 int rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR3Enabled", &pVM->fRawR3Enabled);
115 if (VBOX_FAILURE(rc))
116 pVM->fRawR3Enabled = true;
117 rc = CFGMR3QueryBool(CFGMR3GetRoot(pVM), "RawR0Enabled", &pVM->fRawR0Enabled);
118 if (VBOX_FAILURE(rc))
119 pVM->fRawR0Enabled = true;
120 Log(("EMR3Init: fRawR3Enabled=%d fRawR0Enabled=%d\n", pVM->fRawR3Enabled, pVM->fRawR0Enabled));
121 pVM->em.s.enmState = EMSTATE_NONE;
122 pVM->em.s.fForceRAW = false;
123
124 rc = CPUMQueryGuestCtxPtr(pVM, &pVM->em.s.pCtx);
125 AssertMsgRC(rc, ("CPUMQueryGuestCtxPtr -> %Vrc\n", rc));
126 pVM->em.s.pPatmGCState = PATMR3QueryGCStateHC(pVM);
127 AssertMsg(pVM->em.s.pPatmGCState, ("PATMR3QueryGCStateHC failed!\n"));
128
129 /*
130 * Saved state.
131 */
132 rc = SSMR3RegisterInternal(pVM, "em", 0, EM_SAVED_STATE_VERSION, 16,
133 NULL, emR3Save, NULL,
134 NULL, emR3Load, NULL);
135 if (VBOX_FAILURE(rc))
136 return rc;
137
138 /*
139 * Statistics.
140 */
141#ifdef VBOX_WITH_STATISTICS
142 PEMSTATS pStats;
143 rc = MMHyperAlloc(pVM, sizeof(*pStats), 0, MM_TAG_EM, (void **)&pStats);
144 if (VBOX_FAILURE(rc))
145 return rc;
146 pVM->em.s.pStatsHC = pStats;
147 pVM->em.s.pStatsGC = MMHyperHC2GC(pVM, pStats);
148
149 STAM_REG(pVM, &pStats->StatGCEmulate, STAMTYPE_PROFILE, "/EM/GC/Interpret", STAMUNIT_TICKS_PER_CALL, "Profiling of EMInterpretInstruction.");
150 STAM_REG(pVM, &pStats->StatHCEmulate, STAMTYPE_PROFILE, "/EM/HC/Interpret", STAMUNIT_TICKS_PER_CALL, "Profiling of EMInterpretInstruction.");
151
152 STAM_REG(pVM, &pStats->StatGCInterpretSucceeded, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success", STAMUNIT_OCCURENCES, "The number of times an instruction was successfully interpreted.");
153 STAM_REG(pVM, &pStats->StatHCInterpretSucceeded, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success", STAMUNIT_OCCURENCES, "The number of times an instruction was successfully interpreted.");
154
155 STAM_REG_USED(pVM, &pStats->StatGCAnd, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/And", STAMUNIT_OCCURENCES, "The number of times AND was successfully interpreted.");
156 STAM_REG_USED(pVM, &pStats->StatHCAnd, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/And", STAMUNIT_OCCURENCES, "The number of times AND was successfully interpreted.");
157 STAM_REG_USED(pVM, &pStats->StatGCAdd, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Add", STAMUNIT_OCCURENCES, "The number of times ADD was successfully interpreted.");
158 STAM_REG_USED(pVM, &pStats->StatHCAdd, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Add", STAMUNIT_OCCURENCES, "The number of times ADD was successfully interpreted.");
159 STAM_REG_USED(pVM, &pStats->StatGCAdc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was successfully interpreted.");
160 STAM_REG_USED(pVM, &pStats->StatHCAdc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was successfully interpreted.");
161 STAM_REG_USED(pVM, &pStats->StatGCSub, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was successfully interpreted.");
162 STAM_REG_USED(pVM, &pStats->StatHCSub, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was successfully interpreted.");
163 STAM_REG_USED(pVM, &pStats->StatGCCpuId, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was successfully interpreted.");
164 STAM_REG_USED(pVM, &pStats->StatHCCpuId, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was successfully interpreted.");
165 STAM_REG_USED(pVM, &pStats->StatGCDec, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was successfully interpreted.");
166 STAM_REG_USED(pVM, &pStats->StatHCDec, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was successfully interpreted.");
167 STAM_REG_USED(pVM, &pStats->StatGCHlt, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was successfully interpreted.");
168 STAM_REG_USED(pVM, &pStats->StatHCHlt, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was successfully interpreted.");
169 STAM_REG_USED(pVM, &pStats->StatGCInc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Inc", STAMUNIT_OCCURENCES, "The number of times INC was successfully interpreted.");
170 STAM_REG_USED(pVM, &pStats->StatHCInc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Inc", STAMUNIT_OCCURENCES, "The number of times INC was successfully interpreted.");
171 STAM_REG_USED(pVM, &pStats->StatGCInvlPg, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Invlpg", STAMUNIT_OCCURENCES, "The number of times INVLPG was successfully interpreted.");
172 STAM_REG_USED(pVM, &pStats->StatHCInvlPg, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Invlpg", STAMUNIT_OCCURENCES, "The number of times INVLPG was successfully interpreted.");
173 STAM_REG_USED(pVM, &pStats->StatGCIret, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was successfully interpreted.");
174 STAM_REG_USED(pVM, &pStats->StatHCIret, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was successfully interpreted.");
175 STAM_REG_USED(pVM, &pStats->StatGCLLdt, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was successfully interpreted.");
176 STAM_REG_USED(pVM, &pStats->StatHCLLdt, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was successfully interpreted.");
177 STAM_REG_USED(pVM, &pStats->StatGCMov, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was successfully interpreted.");
178 STAM_REG_USED(pVM, &pStats->StatHCMov, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was successfully interpreted.");
179 STAM_REG_USED(pVM, &pStats->StatGCMovCRx, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was successfully interpreted.");
180 STAM_REG_USED(pVM, &pStats->StatHCMovCRx, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was successfully interpreted.");
181 STAM_REG_USED(pVM, &pStats->StatGCMovDRx, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was successfully interpreted.");
182 STAM_REG_USED(pVM, &pStats->StatHCMovDRx, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was successfully interpreted.");
183 STAM_REG_USED(pVM, &pStats->StatGCOr, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Or", STAMUNIT_OCCURENCES, "The number of times OR was successfully interpreted.");
184 STAM_REG_USED(pVM, &pStats->StatHCOr, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Or", STAMUNIT_OCCURENCES, "The number of times OR was successfully interpreted.");
185 STAM_REG_USED(pVM, &pStats->StatGCPop, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Pop", STAMUNIT_OCCURENCES, "The number of times POP was successfully interpreted.");
186 STAM_REG_USED(pVM, &pStats->StatHCPop, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Pop", STAMUNIT_OCCURENCES, "The number of times POP was successfully interpreted.");
187 STAM_REG_USED(pVM, &pStats->StatGCRdtsc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was successfully interpreted.");
188 STAM_REG_USED(pVM, &pStats->StatHCRdtsc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was successfully interpreted.");
189 STAM_REG_USED(pVM, &pStats->StatGCSti, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Sti", STAMUNIT_OCCURENCES, "The number of times STI was successfully interpreted.");
190 STAM_REG_USED(pVM, &pStats->StatHCSti, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Sti", STAMUNIT_OCCURENCES, "The number of times STI was successfully interpreted.");
191 STAM_REG_USED(pVM, &pStats->StatGCXchg, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was successfully interpreted.");
192 STAM_REG_USED(pVM, &pStats->StatHCXchg, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was successfully interpreted.");
193 STAM_REG_USED(pVM, &pStats->StatGCXor, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was successfully interpreted.");
194 STAM_REG_USED(pVM, &pStats->StatHCXor, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was successfully interpreted.");
195 STAM_REG_USED(pVM, &pStats->StatGCMonitor, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was successfully interpreted.");
196 STAM_REG_USED(pVM, &pStats->StatHCMonitor, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was successfully interpreted.");
197 STAM_REG_USED(pVM, &pStats->StatGCMWait, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/MWait", STAMUNIT_OCCURENCES, "The number of times MWAIT was successfully interpreted.");
198 STAM_REG_USED(pVM, &pStats->StatHCMWait, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/MWait", STAMUNIT_OCCURENCES, "The number of times MWAIT was successfully interpreted.");
199 STAM_REG_USED(pVM, &pStats->StatGCBtr, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was successfully interpreted.");
200 STAM_REG_USED(pVM, &pStats->StatHCBtr, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was successfully interpreted.");
201 STAM_REG_USED(pVM, &pStats->StatGCBts, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was successfully interpreted.");
202 STAM_REG_USED(pVM, &pStats->StatHCBts, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was successfully interpreted.");
203 STAM_REG_USED(pVM, &pStats->StatGCBtc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was successfully interpreted.");
204 STAM_REG_USED(pVM, &pStats->StatHCBtc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was successfully interpreted.");
205 STAM_REG_USED(pVM, &pStats->StatGCCmpXchg, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was successfully interpreted.");
206 STAM_REG_USED(pVM, &pStats->StatHCCmpXchg, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was successfully interpreted.");
207 STAM_REG_USED(pVM, &pStats->StatGCCmpXchg8b, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was successfully interpreted.");
208 STAM_REG_USED(pVM, &pStats->StatHCCmpXchg8b, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was successfully interpreted.");
209 STAM_REG_USED(pVM, &pStats->StatGCXAdd, STAMTYPE_COUNTER, "/EM/GC/Interpret/Success/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was successfully interpreted.");
210 STAM_REG_USED(pVM, &pStats->StatHCXAdd, STAMTYPE_COUNTER, "/EM/HC/Interpret/Success/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was successfully interpreted.");
211
212 STAM_REG(pVM, &pStats->StatGCInterpretFailed, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed", STAMUNIT_OCCURENCES, "The number of times an instruction was not interpreted.");
213 STAM_REG(pVM, &pStats->StatHCInterpretFailed, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed", STAMUNIT_OCCURENCES, "The number of times an instruction was not interpreted.");
214
215 STAM_REG_USED(pVM, &pStats->StatGCFailedAnd, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/And", STAMUNIT_OCCURENCES, "The number of times AND was not interpreted.");
216 STAM_REG_USED(pVM, &pStats->StatHCFailedAnd, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/And", STAMUNIT_OCCURENCES, "The number of times AND was not interpreted.");
217 STAM_REG_USED(pVM, &pStats->StatGCFailedCpuId, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was not interpreted.");
218 STAM_REG_USED(pVM, &pStats->StatHCFailedCpuId, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/CpuId", STAMUNIT_OCCURENCES, "The number of times CPUID was not interpreted.");
219 STAM_REG_USED(pVM, &pStats->StatGCFailedDec, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was not interpreted.");
220 STAM_REG_USED(pVM, &pStats->StatHCFailedDec, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Dec", STAMUNIT_OCCURENCES, "The number of times DEC was not interpreted.");
221 STAM_REG_USED(pVM, &pStats->StatGCFailedHlt, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was not interpreted.");
222 STAM_REG_USED(pVM, &pStats->StatHCFailedHlt, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Hlt", STAMUNIT_OCCURENCES, "The number of times HLT was not interpreted.");
223 STAM_REG_USED(pVM, &pStats->StatGCFailedInc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Inc", STAMUNIT_OCCURENCES, "The number of times INC was not interpreted.");
224 STAM_REG_USED(pVM, &pStats->StatHCFailedInc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Inc", STAMUNIT_OCCURENCES, "The number of times INC was not interpreted.");
225 STAM_REG_USED(pVM, &pStats->StatGCFailedInvlPg, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/InvlPg", STAMUNIT_OCCURENCES, "The number of times INVLPG was not interpreted.");
226 STAM_REG_USED(pVM, &pStats->StatHCFailedInvlPg, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/InvlPg", STAMUNIT_OCCURENCES, "The number of times INVLPG was not interpreted.");
227 STAM_REG_USED(pVM, &pStats->StatGCFailedIret, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was not interpreted.");
228 STAM_REG_USED(pVM, &pStats->StatHCFailedIret, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Iret", STAMUNIT_OCCURENCES, "The number of times IRET was not interpreted.");
229 STAM_REG_USED(pVM, &pStats->StatGCFailedLLdt, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was not interpreted.");
230 STAM_REG_USED(pVM, &pStats->StatHCFailedLLdt, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/LLdt", STAMUNIT_OCCURENCES, "The number of times LLDT was not interpreted.");
231 STAM_REG_USED(pVM, &pStats->StatGCFailedMov, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was not interpreted.");
232 STAM_REG_USED(pVM, &pStats->StatHCFailedMov, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Mov", STAMUNIT_OCCURENCES, "The number of times MOV was not interpreted.");
233 STAM_REG_USED(pVM, &pStats->StatGCFailedMovCRx, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was not interpreted.");
234 STAM_REG_USED(pVM, &pStats->StatHCFailedMovCRx, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/MovCRx", STAMUNIT_OCCURENCES, "The number of times MOV CRx was not interpreted.");
235 STAM_REG_USED(pVM, &pStats->StatGCFailedMovDRx, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was not interpreted.");
236 STAM_REG_USED(pVM, &pStats->StatHCFailedMovDRx, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/MovDRx", STAMUNIT_OCCURENCES, "The number of times MOV DRx was not interpreted.");
237 STAM_REG_USED(pVM, &pStats->StatGCFailedOr, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Or", STAMUNIT_OCCURENCES, "The number of times OR was not interpreted.");
238 STAM_REG_USED(pVM, &pStats->StatHCFailedOr, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Or", STAMUNIT_OCCURENCES, "The number of times OR was not interpreted.");
239 STAM_REG_USED(pVM, &pStats->StatGCFailedPop, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Pop", STAMUNIT_OCCURENCES, "The number of times POP was not interpreted.");
240 STAM_REG_USED(pVM, &pStats->StatHCFailedPop, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Pop", STAMUNIT_OCCURENCES, "The number of times POP was not interpreted.");
241 STAM_REG_USED(pVM, &pStats->StatGCFailedSti, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Sti", STAMUNIT_OCCURENCES, "The number of times STI was not interpreted.");
242 STAM_REG_USED(pVM, &pStats->StatHCFailedSti, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Sti", STAMUNIT_OCCURENCES, "The number of times STI was not interpreted.");
243 STAM_REG_USED(pVM, &pStats->StatGCFailedXchg, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was not interpreted.");
244 STAM_REG_USED(pVM, &pStats->StatHCFailedXchg, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Xchg", STAMUNIT_OCCURENCES, "The number of times XCHG was not interpreted.");
245 STAM_REG_USED(pVM, &pStats->StatGCFailedXor, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was not interpreted.");
246 STAM_REG_USED(pVM, &pStats->StatHCFailedXor, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Xor", STAMUNIT_OCCURENCES, "The number of times XOR was not interpreted.");
247 STAM_REG_USED(pVM, &pStats->StatGCFailedMonitor, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
248 STAM_REG_USED(pVM, &pStats->StatHCFailedMonitor, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Monitor", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
249 STAM_REG_USED(pVM, &pStats->StatGCFailedMWait, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/MWait", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
250 STAM_REG_USED(pVM, &pStats->StatHCFailedMWait, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/MWait", STAMUNIT_OCCURENCES, "The number of times MONITOR was not interpreted.");
251 STAM_REG_USED(pVM, &pStats->StatGCFailedRdtsc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was not interpreted.");
252 STAM_REG_USED(pVM, &pStats->StatHCFailedRdtsc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Rdtsc", STAMUNIT_OCCURENCES, "The number of times RDTSC was not interpreted.");
253
254 STAM_REG_USED(pVM, &pStats->StatGCFailedMisc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Misc", STAMUNIT_OCCURENCES, "The number of times some misc instruction was encountered.");
255 STAM_REG_USED(pVM, &pStats->StatHCFailedMisc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Misc", STAMUNIT_OCCURENCES, "The number of times some misc instruction was encountered.");
256 STAM_REG_USED(pVM, &pStats->StatGCFailedAdd, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Add", STAMUNIT_OCCURENCES, "The number of times ADD was not interpreted.");
257 STAM_REG_USED(pVM, &pStats->StatHCFailedAdd, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Add", STAMUNIT_OCCURENCES, "The number of times ADD was not interpreted.");
258 STAM_REG_USED(pVM, &pStats->StatGCFailedAdc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was not interpreted.");
259 STAM_REG_USED(pVM, &pStats->StatHCFailedAdc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Adc", STAMUNIT_OCCURENCES, "The number of times ADC was not interpreted.");
260 STAM_REG_USED(pVM, &pStats->StatGCFailedBtr, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was not interpreted.");
261 STAM_REG_USED(pVM, &pStats->StatHCFailedBtr, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Btr", STAMUNIT_OCCURENCES, "The number of times BTR was not interpreted.");
262 STAM_REG_USED(pVM, &pStats->StatGCFailedBts, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was not interpreted.");
263 STAM_REG_USED(pVM, &pStats->StatHCFailedBts, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Bts", STAMUNIT_OCCURENCES, "The number of times BTS was not interpreted.");
264 STAM_REG_USED(pVM, &pStats->StatGCFailedBtc, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was not interpreted.");
265 STAM_REG_USED(pVM, &pStats->StatHCFailedBtc, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Btc", STAMUNIT_OCCURENCES, "The number of times BTC was not interpreted.");
266 STAM_REG_USED(pVM, &pStats->StatGCFailedCli, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Cli", STAMUNIT_OCCURENCES, "The number of times CLI was not interpreted.");
267 STAM_REG_USED(pVM, &pStats->StatHCFailedCli, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Cli", STAMUNIT_OCCURENCES, "The number of times CLI was not interpreted.");
268 STAM_REG_USED(pVM, &pStats->StatGCFailedCmpXchg, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was not interpreted.");
269 STAM_REG_USED(pVM, &pStats->StatHCFailedCmpXchg, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/CmpXchg", STAMUNIT_OCCURENCES, "The number of times CMPXCHG was not interpreted.");
270 STAM_REG_USED(pVM, &pStats->StatGCFailedCmpXchg8b, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was not interpreted.");
271 STAM_REG_USED(pVM, &pStats->StatHCFailedCmpXchg8b, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/CmpXchg8b", STAMUNIT_OCCURENCES, "The number of times CMPXCHG8B was not interpreted.");
272 STAM_REG_USED(pVM, &pStats->StatGCFailedXAdd, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was not interpreted.");
273 STAM_REG_USED(pVM, &pStats->StatHCFailedXAdd, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/XAdd", STAMUNIT_OCCURENCES, "The number of times XADD was not interpreted.");
274 STAM_REG_USED(pVM, &pStats->StatGCFailedMovNTPS, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/MovNTPS", STAMUNIT_OCCURENCES, "The number of times MOVNTPS was not interpreted.");
275 STAM_REG_USED(pVM, &pStats->StatHCFailedMovNTPS, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/MovNTPS", STAMUNIT_OCCURENCES, "The number of times MOVNTPS was not interpreted.");
276 STAM_REG_USED(pVM, &pStats->StatGCFailedStosWD, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/StosWD", STAMUNIT_OCCURENCES, "The number of times STOSWD was not interpreted.");
277 STAM_REG_USED(pVM, &pStats->StatHCFailedStosWD, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/StosWD", STAMUNIT_OCCURENCES, "The number of times STOSWD was not interpreted.");
278 STAM_REG_USED(pVM, &pStats->StatGCFailedSub, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was not interpreted.");
279 STAM_REG_USED(pVM, &pStats->StatHCFailedSub, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Sub", STAMUNIT_OCCURENCES, "The number of times SUB was not interpreted.");
280 STAM_REG_USED(pVM, &pStats->StatGCFailedWbInvd, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/WbInvd", STAMUNIT_OCCURENCES, "The number of times WBINVD was not interpreted.");
281 STAM_REG_USED(pVM, &pStats->StatHCFailedWbInvd, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/WbInvd", STAMUNIT_OCCURENCES, "The number of times WBINVD was not interpreted.");
282
283 STAM_REG_USED(pVM, &pStats->StatGCFailedUserMode, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/UserMode", STAMUNIT_OCCURENCES, "The number of rejections because of CPL.");
284 STAM_REG_USED(pVM, &pStats->StatHCFailedUserMode, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/UserMode", STAMUNIT_OCCURENCES, "The number of rejections because of CPL.");
285 STAM_REG_USED(pVM, &pStats->StatGCFailedPrefix, STAMTYPE_COUNTER, "/EM/GC/Interpret/Failed/Prefix", STAMUNIT_OCCURENCES, "The number of rejections because of prefix .");
286 STAM_REG_USED(pVM, &pStats->StatHCFailedPrefix, STAMTYPE_COUNTER, "/EM/HC/Interpret/Failed/Prefix", STAMUNIT_OCCURENCES, "The number of rejections because of prefix .");
287
288 STAM_REG_USED(pVM, &pStats->StatCli, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Cli", STAMUNIT_OCCURENCES, "Number of cli instructions.");
289 STAM_REG_USED(pVM, &pStats->StatSti, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Sti", STAMUNIT_OCCURENCES, "Number of sli instructions.");
290 STAM_REG_USED(pVM, &pStats->StatIn, STAMTYPE_COUNTER, "/EM/HC/PrivInst/In", STAMUNIT_OCCURENCES, "Number of in instructions.");
291 STAM_REG_USED(pVM, &pStats->StatOut, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Out", STAMUNIT_OCCURENCES, "Number of out instructions.");
292 STAM_REG_USED(pVM, &pStats->StatHlt, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Hlt", STAMUNIT_OCCURENCES, "Number of hlt instructions not handled in GC because of PATM.");
293 STAM_REG_USED(pVM, &pStats->StatInvlpg, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Invlpg", STAMUNIT_OCCURENCES, "Number of invlpg instructions.");
294 STAM_REG_USED(pVM, &pStats->StatMisc, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Misc", STAMUNIT_OCCURENCES, "Number of misc. instructions.");
295 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[0], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov CR0, X", STAMUNIT_OCCURENCES, "Number of mov CR0 read instructions.");
296 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[1], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov CR1, X", STAMUNIT_OCCURENCES, "Number of mov CR1 read instructions.");
297 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[2], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov CR2, X", STAMUNIT_OCCURENCES, "Number of mov CR2 read instructions.");
298 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[3], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov CR3, X", STAMUNIT_OCCURENCES, "Number of mov CR3 read instructions.");
299 STAM_REG_USED(pVM, &pStats->StatMovWriteCR[4], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov CR4, X", STAMUNIT_OCCURENCES, "Number of mov CR4 read instructions.");
300 STAM_REG_USED(pVM, &pStats->StatMovReadCR[0], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov X, CR0", STAMUNIT_OCCURENCES, "Number of mov CR0 write instructions.");
301 STAM_REG_USED(pVM, &pStats->StatMovReadCR[1], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov X, CR1", STAMUNIT_OCCURENCES, "Number of mov CR1 write instructions.");
302 STAM_REG_USED(pVM, &pStats->StatMovReadCR[2], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov X, CR2", STAMUNIT_OCCURENCES, "Number of mov CR2 write instructions.");
303 STAM_REG_USED(pVM, &pStats->StatMovReadCR[3], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov X, CR3", STAMUNIT_OCCURENCES, "Number of mov CR3 write instructions.");
304 STAM_REG_USED(pVM, &pStats->StatMovReadCR[4], STAMTYPE_COUNTER, "/EM/HC/PrivInst/Mov X, CR4", STAMUNIT_OCCURENCES, "Number of mov CR4 write instructions.");
305 STAM_REG_USED(pVM, &pStats->StatMovDRx, STAMTYPE_COUNTER, "/EM/HC/PrivInst/MovDRx", STAMUNIT_OCCURENCES, "Number of mov DRx instructions.");
306 STAM_REG_USED(pVM, &pStats->StatIret, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Iret", STAMUNIT_OCCURENCES, "Number of iret instructions.");
307 STAM_REG_USED(pVM, &pStats->StatMovLgdt, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Lgdt", STAMUNIT_OCCURENCES, "Number of lgdt instructions.");
308 STAM_REG_USED(pVM, &pStats->StatMovLidt, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Lidt", STAMUNIT_OCCURENCES, "Number of lidt instructions.");
309 STAM_REG_USED(pVM, &pStats->StatMovLldt, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Lldt", STAMUNIT_OCCURENCES, "Number of lldt instructions.");
310 STAM_REG_USED(pVM, &pStats->StatSysEnter, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Sysenter", STAMUNIT_OCCURENCES, "Number of sysenter instructions.");
311 STAM_REG_USED(pVM, &pStats->StatSysExit, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Sysexit", STAMUNIT_OCCURENCES, "Number of sysexit instructions.");
312 STAM_REG_USED(pVM, &pStats->StatSysCall, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Syscall", STAMUNIT_OCCURENCES, "Number of syscall instructions.");
313 STAM_REG_USED(pVM, &pStats->StatSysRet, STAMTYPE_COUNTER, "/EM/HC/PrivInst/Sysret", STAMUNIT_OCCURENCES, "Number of sysret instructions.");
314
315 STAM_REG(pVM, &pVM->em.s.StatTotalClis, STAMTYPE_COUNTER, "/EM/Cli/Total", STAMUNIT_OCCURENCES, "Total number of cli instructions executed.");
316 pVM->em.s.pCliStatTree = 0;
317#endif /* VBOX_WITH_STATISTICS */
318
319/* these should be considered for release statistics. */
320 STAM_REG(pVM, &pVM->em.s.StatForcedActions, STAMTYPE_PROFILE, "/PROF/EM/ForcedActions", STAMUNIT_TICKS_PER_CALL, "Profiling forced action execution.");
321 STAM_REL_REG(pVM, &pVM->em.s.StatHalted, STAMTYPE_PROFILE, "/PROF/EM/Halted", STAMUNIT_TICKS_PER_CALL, "Profiling halted state (VMR3WaitHalted).");
322 STAM_REG(pVM, &pVM->em.s.StatHwAccEntry, STAMTYPE_PROFILE, "/PROF/EM/HwAccEnter", STAMUNIT_TICKS_PER_CALL, "Profiling Hardware Accelerated Mode entry overhead.");
323 STAM_REG(pVM, &pVM->em.s.StatHwAccExec, STAMTYPE_PROFILE, "/PROF/EM/HwAccExec", STAMUNIT_TICKS_PER_CALL, "Profiling Hardware Accelerated Mode execution.");
324 STAM_REG(pVM, &pVM->em.s.StatIOEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/IO", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawExecuteIOInstruction.");
325 STAM_REG(pVM, &pVM->em.s.StatPrivEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/Priv", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawPrivileged.");
326 STAM_REG(pVM, &pVM->em.s.StatMiscEmu, STAMTYPE_PROFILE, "/PROF/EM/Emulation/Misc", STAMUNIT_TICKS_PER_CALL, "Profiling of emR3RawExecuteInstruction.");
327 STAM_REG(pVM, &pVM->em.s.StatREMEmu, STAMTYPE_PROFILE, "/PROF/EM/REMEmuSingle", STAMUNIT_TICKS_PER_CALL, "Profiling single instruction REM execution.");
328 STAM_REG(pVM, &pVM->em.s.StatREMExec, STAMTYPE_PROFILE, "/PROF/EM/REMExec", STAMUNIT_TICKS_PER_CALL, "Profiling REM execution.");
329 STAM_REG(pVM, &pVM->em.s.StatREMSync, STAMTYPE_PROFILE, "/PROF/EM/REMSync", STAMUNIT_TICKS_PER_CALL, "Profiling REM context syncing.");
330 STAM_REG(pVM, &pVM->em.s.StatREMTotal, STAMTYPE_PROFILE, "/PROF/EM/REMTotal", STAMUNIT_TICKS_PER_CALL, "Profiling emR3RemExecute (excluding FFs).");
331 STAM_REG(pVM, &pVM->em.s.StatRAWEntry, STAMTYPE_PROFILE, "/PROF/EM/RAWEnter", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode entry overhead.");
332 STAM_REG(pVM, &pVM->em.s.StatRAWExec, STAMTYPE_PROFILE, "/PROF/EM/RAWExec", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode execution.");
333 STAM_REG(pVM, &pVM->em.s.StatRAWTail, STAMTYPE_PROFILE, "/PROF/EM/RAWTail", STAMUNIT_TICKS_PER_CALL, "Profiling Raw Mode tail overhead.");
334 STAM_REG(pVM, &pVM->em.s.StatRAWTotal, STAMTYPE_PROFILE, "/PROF/EM/RAWTotal", STAMUNIT_TICKS_PER_CALL, "Profiling emR3RawExecute (excluding FFs).");
335 STAM_REL_REG(pVM, &pVM->em.s.StatTotal, STAMTYPE_PROFILE_ADV, "/PROF/EM/Total", STAMUNIT_TICKS_PER_CALL, "Profiling EMR3ExecuteVM.");
336
337
338 return VINF_SUCCESS;
339}
340
341
342
343/**
344 * Applies relocations to data and code managed by this
345 * component. This function will be called at init and
346 * whenever the VMM need to relocate it self inside the GC.
347 *
348 * @param pVM The VM.
349 */
350EMR3DECL(void) EMR3Relocate(PVM pVM)
351{
352 LogFlow(("EMR3Relocate\n"));
353 if (pVM->em.s.pStatsHC)
354 pVM->em.s.pStatsGC = MMHyperHC2GC(pVM, pVM->em.s.pStatsHC);
355}
356
357
358/**
359 * Reset notification.
360 *
361 * @param pVM
362 */
363EMR3DECL(void) EMR3Reset(PVM pVM)
364{
365 LogFlow(("EMR3Reset: \n"));
366 pVM->em.s.fForceRAW = false;
367}
368
369
370/**
371 * Terminates the EM.
372 *
373 * Termination means cleaning up and freeing all resources,
374 * the VM it self is at this point powered off or suspended.
375 *
376 * @returns VBox status code.
377 * @param pVM The VM to operate on.
378 */
379EMR3DECL(int) EMR3Term(PVM pVM)
380{
381 AssertMsg(pVM->em.s.offVM, ("bad init order!\n"));
382
383 return VINF_SUCCESS;
384}
385
386
387/**
388 * Execute state save operation.
389 *
390 * @returns VBox status code.
391 * @param pVM VM Handle.
392 * @param pSSM SSM operation handle.
393 */
394static DECLCALLBACK(int) emR3Save(PVM pVM, PSSMHANDLE pSSM)
395{
396 return SSMR3PutBool(pSSM, pVM->em.s.fForceRAW);
397}
398
399
400/**
401 * Execute state load operation.
402 *
403 * @returns VBox status code.
404 * @param pVM VM Handle.
405 * @param pSSM SSM operation handle.
406 * @param u32Version Data layout version.
407 */
408static DECLCALLBACK(int) emR3Load(PVM pVM, PSSMHANDLE pSSM, uint32_t u32Version)
409{
410 /*
411 * Validate version.
412 */
413 if (u32Version != EM_SAVED_STATE_VERSION)
414 {
415 Log(("emR3Load: Invalid version u32Version=%d (current %d)!\n", u32Version, EM_SAVED_STATE_VERSION));
416 return VERR_SSM_UNSUPPORTED_DATA_UNIT_VERSION;
417 }
418
419 /*
420 * Load the saved state.
421 */
422 int rc = SSMR3GetBool(pSSM, &pVM->em.s.fForceRAW);
423 if (VBOX_FAILURE(rc))
424 pVM->em.s.fForceRAW = false;
425
426 Assert(pVM->em.s.pCliStatTree == 0);
427 return rc;
428}
429
430
431/**
432 * Enables or disables a set of raw-mode execution modes.
433 *
434 * @returns VINF_SUCCESS on success.
435 * @returns VINF_RESCHEDULE if a rescheduling might be required.
436 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
437 *
438 * @param pVM The VM to operate on.
439 * @param enmMode The execution mode change.
440 * @thread The emulation thread.
441 */
442EMR3DECL(int) EMR3RawSetMode(PVM pVM, EMRAWMODE enmMode)
443{
444 switch (enmMode)
445 {
446 case EMRAW_NONE:
447 pVM->fRawR3Enabled = false;
448 pVM->fRawR0Enabled = false;
449 break;
450 case EMRAW_RING3_ENABLE:
451 pVM->fRawR3Enabled = true;
452 break;
453 case EMRAW_RING3_DISABLE:
454 pVM->fRawR3Enabled = false;
455 break;
456 case EMRAW_RING0_ENABLE:
457 pVM->fRawR0Enabled = true;
458 break;
459 case EMRAW_RING0_DISABLE:
460 pVM->fRawR0Enabled = false;
461 break;
462 default:
463 AssertMsgFailed(("Invalid enmMode=%d\n", enmMode));
464 return VERR_INVALID_PARAMETER;
465 }
466 Log(("EMR3SetRawMode: fRawR3Enabled=%RTbool fRawR0Enabled=%RTbool pVM->fRawR3Enabled=%RTbool\n",
467 pVM->fRawR3Enabled, pVM->fRawR0Enabled, pVM->fRawR3Enabled));
468 return pVM->em.s.enmState == EMSTATE_RAW ? VINF_EM_RESCHEDULE : VINF_SUCCESS;
469}
470
471
472/**
473 * Raise a fatal error.
474 *
475 * Safely terminate the VM with full state report and stuff. This function
476 * will naturally never return.
477 *
478 * @param pVM VM handle.
479 * @param rc VBox status code.
480 */
481EMR3DECL(void) EMR3FatalError(PVM pVM, int rc)
482{
483 longjmp(pVM->em.s.u.FatalLongJump, rc);
484 AssertReleaseMsgFailed(("longjmp returned!\n"));
485}
486
487
488/**
489 * Gets the EM state name.
490 *
491 * @returns pointer to read only state name,
492 * @param enmState The state.
493 */
494EMR3DECL(const char *) EMR3GetStateName(EMSTATE enmState)
495{
496 switch (enmState)
497 {
498 case EMSTATE_NONE: return "EMSTATE_NONE";
499 case EMSTATE_RAW: return "EMSTATE_RAW";
500 case EMSTATE_HWACC: return "EMSTATE_HWACC";
501 case EMSTATE_REM: return "EMSTATE_REM";
502 case EMSTATE_HALTED: return "EMSTATE_HALTED";
503 case EMSTATE_SUSPENDED: return "EMSTATE_SUSPENDED";
504 case EMSTATE_TERMINATING: return "EMSTATE_TERMINATING";
505 case EMSTATE_DEBUG_GUEST_RAW: return "EMSTATE_DEBUG_GUEST_RAW";
506 case EMSTATE_DEBUG_GUEST_REM: return "EMSTATE_DEBUG_GUEST_REM";
507 case EMSTATE_DEBUG_HYPER: return "EMSTATE_DEBUG_HYPER";
508 case EMSTATE_GURU_MEDITATION: return "EMSTATE_GURU_MEDITATION";
509 default: return "Unknown!";
510 }
511}
512
513
514#ifdef VBOX_WITH_STATISTICS
515/**
516 * Just a braindead function to keep track of cli addresses.
517 * @param pVM VM handle.
518 * @param pInstrGC The EIP of the cli instruction.
519 */
520static void emR3RecordCli(PVM pVM, RTGCPTR pInstrGC)
521{
522 PCLISTAT pRec;
523
524 pRec = (PCLISTAT)RTAvlPVGet(&pVM->em.s.pCliStatTree, (AVLPVKEY)pInstrGC);
525 if (!pRec)
526 {
527 /* New cli instruction; insert into the tree. */
528 pRec = (PCLISTAT)MMR3HeapAllocZ(pVM, MM_TAG_EM, sizeof(*pRec));
529 Assert(pRec);
530 if (!pRec)
531 return;
532 pRec->Core.Key = (AVLPVKEY)pInstrGC;
533
534 char szCliStatName[32];
535 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%VGv", pInstrGC);
536 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed.");
537
538 bool fRc = RTAvlPVInsert(&pVM->em.s.pCliStatTree, &pRec->Core);
539 Assert(fRc); NOREF(fRc);
540 }
541 STAM_COUNTER_INC(&pRec->Counter);
542 STAM_COUNTER_INC(&pVM->em.s.StatTotalClis);
543}
544#endif /* VBOX_WITH_STATISTICS */
545
546
547/**
548 * Debug loop.
549 *
550 * @returns VBox status code for EM.
551 * @param pVM VM handle.
552 * @param rc Current EM VBox status code..
553 */
554static int emR3Debug(PVM pVM, int rc)
555{
556 for (;;)
557 {
558 Log(("emR3Debug: rc=%Vrc\n", rc));
559 const int rcLast = rc;
560
561 /*
562 * Debug related RC.
563 */
564 switch (rc)
565 {
566 /*
567 * Single step an instruction.
568 */
569 case VINF_EM_DBG_STEP:
570 if ( pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
571 || pVM->em.s.enmState == EMSTATE_DEBUG_HYPER
572 || pVM->em.s.fForceRAW /* paranoia */)
573 rc = emR3RawStep(pVM);
574 else
575 {
576 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
577 rc = emR3RemStep(pVM);
578 }
579 break;
580
581 /*
582 * Simple events: stepped, breakpoint, stop/assertion.
583 */
584 case VINF_EM_DBG_STEPPED:
585 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED);
586 break;
587
588 case VINF_EM_DBG_BREAKPOINT:
589 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT);
590 break;
591
592 case VINF_EM_DBG_STOP:
593 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, NULL, 0, NULL, NULL);
594 break;
595
596 case VINF_EM_DBG_HYPER_STEPPED:
597 rc = DBGFR3Event(pVM, DBGFEVENT_STEPPED_HYPER);
598 break;
599
600 case VINF_EM_DBG_HYPER_BREAKPOINT:
601 rc = DBGFR3EventBreakpoint(pVM, DBGFEVENT_BREAKPOINT_HYPER);
602 break;
603
604 case VINF_EM_DBG_HYPER_ASSERTION:
605 RTPrintf("\nVINF_EM_DBG_HYPER_ASSERTION:\n%s%s\n", VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
606 rc = DBGFR3EventAssertion(pVM, DBGFEVENT_ASSERTION_HYPER, VMMR3GetGCAssertMsg1(pVM), VMMR3GetGCAssertMsg2(pVM));
607 break;
608
609 /*
610 * Guru meditation.
611 */
612 case VERR_REM_TOO_MANY_TRAPS: /** @todo Make a guru mediation event! */
613 rc = DBGFR3EventSrc(pVM, DBGFEVENT_DEV_STOP, "VERR_REM_TOO_MANY_TRAPS", 0, NULL, NULL);
614 break;
615
616 default: /** @todo don't use default for guru, but make special errors code! */
617 rc = DBGFR3Event(pVM, DBGFEVENT_FATAL_ERROR);
618 break;
619 }
620
621 /*
622 * Process the result.
623 */
624 do
625 {
626 switch (rc)
627 {
628 /*
629 * Continue the debugging loop.
630 */
631 case VINF_EM_DBG_STEP:
632 case VINF_EM_DBG_STOP:
633 case VINF_EM_DBG_STEPPED:
634 case VINF_EM_DBG_BREAKPOINT:
635 case VINF_EM_DBG_HYPER_STEPPED:
636 case VINF_EM_DBG_HYPER_BREAKPOINT:
637 case VINF_EM_DBG_HYPER_ASSERTION:
638 break;
639
640 /*
641 * Resuming execution (in some form) has to be done here if we got
642 * a hypervisor debug event.
643 */
644 case VINF_SUCCESS:
645 case VINF_EM_RESUME:
646 case VINF_EM_SUSPEND:
647 case VINF_EM_RESCHEDULE:
648 case VINF_EM_RESCHEDULE_RAW:
649 case VINF_EM_RESCHEDULE_REM:
650 case VINF_EM_HALT:
651 if (pVM->em.s.enmState == EMSTATE_DEBUG_HYPER)
652 {
653 rc = emR3RawResumeHyper(pVM);
654 if (rc != VINF_SUCCESS && VBOX_SUCCESS(rc))
655 continue;
656 }
657 if (rc == VINF_SUCCESS)
658 rc = VINF_EM_RESCHEDULE;
659 return rc;
660
661 /*
662 * The debugger isn't attached.
663 * We'll simply turn the thing off since that's the easiest thing to do.
664 */
665 case VERR_DBGF_NOT_ATTACHED:
666 switch (rcLast)
667 {
668 case VINF_EM_DBG_HYPER_ASSERTION:
669 case VINF_EM_DBG_HYPER_STEPPED:
670 case VINF_EM_DBG_HYPER_BREAKPOINT:
671 return rcLast;
672 }
673 return VINF_EM_OFF;
674
675 /*
676 * Status codes terminating the VM in one or another sense.
677 */
678 case VINF_EM_TERMINATE:
679 case VINF_EM_OFF:
680 case VINF_EM_RESET:
681 case VINF_EM_RAW_STALE_SELECTOR:
682 case VINF_EM_RAW_IRET_TRAP:
683 case VERR_TRPM_PANIC:
684 case VERR_TRPM_DONT_PANIC:
685 case VERR_INTERNAL_ERROR:
686 return rc;
687
688 /*
689 * The rest is unexpected, and will keep us here.
690 */
691 default:
692 AssertMsgFailed(("Unxpected rc %Vrc!\n", rc));
693 break;
694 }
695 } while (false);
696 } /* debug for ever */
697}
698
699
700/**
701 * Steps recompiled code.
702 *
703 * @returns VBox status code. The most important ones are: VINF_EM_STEP_EVENT,
704 * VINF_EM_RESCHEDULE, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
705 *
706 * @param pVM VM handle.
707 */
708static int emR3RemStep(PVM pVM)
709{
710 LogFlow(("emR3RemStep: cs:eip=%04x:%08x\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
711
712 /*
713 * Switch to REM, step instruction, switch back.
714 */
715 int rc = REMR3State(pVM);
716 if (VBOX_SUCCESS(rc))
717 {
718 rc = REMR3Step(pVM);
719 REMR3StateBack(pVM);
720 }
721 LogFlow(("emR3RemStep: returns %Vrc cs:eip=%04x:%08x\n", rc, CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
722 return rc;
723}
724
725/**
726 * Executes recompiled code.
727 *
728 * This function contains the recompiler version of the inner
729 * execution loop (the outer loop being in EMR3ExecuteVM()).
730 *
731 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
732 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
733 *
734 * @param pVM VM handle.
735 * @param pfFFDone Where to store an indicator telling wheter or not
736 * FFs were done before returning.
737 *
738 */
739static int emR3RemExecute(PVM pVM, bool *pfFFDone)
740{
741#ifdef LOG_ENABLED
742 PCPUMCTX pCtx = pVM->em.s.pCtx;
743 uint32_t cpl = CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx));
744
745 if (pCtx->eflags.Bits.u1VM)
746 Log(("EMV86: %04X:%08X IF=%d\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF));
747 else
748 Log(("EMR%d: %08X ESP=%08X IF=%d CR0=%x\n", cpl, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0));
749#endif
750 STAM_PROFILE_ADV_START(&pVM->em.s.StatREMTotal, a);
751
752#if defined(VBOX_STRICT) && defined(DEBUG_bird)
753 AssertMsg( VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3|VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
754 || !MMHyperIsInsideArea(pVM, CPUMGetGuestEIP(pVM)), /** @todo #1419 - get flat address. */
755 ("cs:eip=%RX16:%RX32\n", CPUMGetGuestCS(pVM), CPUMGetGuestEIP(pVM)));
756#endif
757
758 /*
759 * Spin till we get a forced action which returns anything but VINF_SUCCESS
760 * or the REM suggests raw-mode execution.
761 */
762 *pfFFDone = false;
763 bool fInREMState = false;
764 int rc = VINF_SUCCESS;
765 for (;;)
766 {
767 /*
768 * Update REM state if not already in sync.
769 */
770 if (!fInREMState)
771 {
772 STAM_PROFILE_START(&pVM->em.s.StatREMSync, b);
773 rc = REMR3State(pVM);
774 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, b);
775 if (VBOX_FAILURE(rc))
776 break;
777 fInREMState = true;
778
779 /*
780 * We might have missed the raising of VMREQ, TIMER and some other
781 * imporant FFs while we were busy switching the state. So, check again.
782 */
783 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST | VM_FF_TIMER | VM_FF_PDM_QUEUES | VM_FF_DBGF | VM_FF_TERMINATE | VM_FF_RESET))
784 {
785 LogFlow(("emR3RemExecute: Skipping run, because FF is set. %#x\n", pVM->fForcedActions));
786 goto l_REMDoForcedActions;
787 }
788 }
789
790
791 /*
792 * Execute REM.
793 */
794 STAM_PROFILE_START(&pVM->em.s.StatREMExec, c);
795 rc = REMR3Run(pVM);
796 STAM_PROFILE_STOP(&pVM->em.s.StatREMExec, c);
797
798
799 /*
800 * Deal with high priority post execution FFs before doing anything else.
801 */
802 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
803 rc = emR3HighPriorityPostForcedActions(pVM, rc);
804
805 /*
806 * Process the returned status code.
807 * (Try keep this short! Call functions!)
808 */
809 if (rc != VINF_SUCCESS)
810 {
811 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
812 break;
813 if (rc != VINF_REM_INTERRUPED_FF)
814 {
815 /*
816 * Anything which is not known to us means an internal error
817 * and the termination of the VM!
818 */
819 AssertMsg(rc == VERR_REM_TOO_MANY_TRAPS, ("Unknown GC return code: %Vra\n", rc));
820 break;
821 }
822 }
823
824
825 /*
826 * Check and execute forced actions.
827 * Sync back the VM state before calling any of these.
828 */
829#ifdef VBOX_HIGH_RES_TIMERS_HACK
830 TMTimerPoll(pVM);
831#endif
832 if (VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK & ~(VM_FF_CSAM_PENDING_ACTION | VM_FF_CSAM_SCAN_PAGE)))
833 {
834l_REMDoForcedActions:
835 if (fInREMState)
836 {
837 STAM_PROFILE_START(&pVM->em.s.StatREMSync, d);
838 REMR3StateBack(pVM);
839 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, d);
840 fInREMState = false;
841 }
842 STAM_PROFILE_ADV_SUSPEND(&pVM->em.s.StatREMTotal, a);
843 rc = emR3ForcedActions(pVM, rc);
844 STAM_PROFILE_ADV_RESUME(&pVM->em.s.StatREMTotal, a);
845 if ( rc != VINF_SUCCESS
846 && rc != VINF_EM_RESCHEDULE_REM)
847 {
848 *pfFFDone = true;
849 break;
850 }
851 }
852
853 } /* The Inner Loop, recompiled execution mode version. */
854
855
856 /*
857 * Returning. Sync back the VM state if required.
858 */
859 if (fInREMState)
860 {
861 STAM_PROFILE_START(&pVM->em.s.StatREMSync, e);
862 REMR3StateBack(pVM);
863 STAM_PROFILE_STOP(&pVM->em.s.StatREMSync, e);
864 }
865
866 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatREMTotal, a);
867 return rc;
868}
869
870
871/**
872 * Resumes executing hypervisor after a debug event.
873 *
874 * This is kind of special since our current guest state is
875 * potentially out of sync.
876 *
877 * @returns VBox status code.
878 * @param pVM The VM handle.
879 */
880static int emR3RawResumeHyper(PVM pVM)
881{
882 int rc;
883 PCPUMCTX pCtx = pVM->em.s.pCtx;
884 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_HYPER);
885 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pCtx->cs, pCtx->eip, pCtx->eflags));
886
887 /*
888 * Resume execution.
889 */
890 CPUMRawEnter(pVM, NULL);
891 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_RF);
892 rc = VMMR3ResumeHyper(pVM);
893 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Vrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));
894 rc = CPUMRawLeave(pVM, NULL, rc);
895 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
896
897 /*
898 * Deal with the return code.
899 */
900 rc = emR3HighPriorityPostForcedActions(pVM, rc);
901 rc = emR3RawHandleRC(pVM, pCtx, rc);
902 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
903 return rc;
904}
905
906
907/**
908 * Steps rawmode.
909 *
910 * @returns VBox status code.
911 * @param pVM The VM handle.
912 */
913static int emR3RawStep(PVM pVM)
914{
915 Assert( pVM->em.s.enmState == EMSTATE_DEBUG_HYPER
916 || pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
917 || pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
918 int rc;
919 PCPUMCTX pCtx = pVM->em.s.pCtx;
920 bool fGuest = pVM->em.s.enmState != EMSTATE_DEBUG_HYPER;
921#ifndef DEBUG_sandervl
922 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVM) : CPUMGetHyperCS(pVM),
923 fGuest ? CPUMGetGuestEIP(pVM) : CPUMGetHyperEIP(pVM), fGuest ? CPUMGetGuestEFlags(pVM) : CPUMGetHyperEFlags(pVM)));
924#endif
925 if (fGuest)
926 {
927 /*
928 * Check vital forced actions, but ignore pending interrupts and timers.
929 */
930 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
931 {
932 rc = emR3RawForcedActions(pVM, pCtx);
933 if (VBOX_FAILURE(rc))
934 return rc;
935 }
936
937 /*
938 * Set flags for single stepping.
939 */
940 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
941 }
942 else
943 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
944
945 /*
946 * Single step.
947 * We do not start time or anything, if anything we should just do a few nanoseconds.
948 */
949 CPUMRawEnter(pVM, NULL);
950 do
951 {
952 if (pVM->em.s.enmState == EMSTATE_DEBUG_HYPER)
953 rc = VMMR3ResumeHyper(pVM);
954 else
955 rc = VMMR3RawRunGC(pVM);
956#ifndef DEBUG_sandervl
957 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Vrc\n", fGuest ? CPUMGetGuestCS(pVM) : CPUMGetHyperCS(pVM),
958 fGuest ? CPUMGetGuestEIP(pVM) : CPUMGetHyperEIP(pVM), fGuest ? CPUMGetGuestEFlags(pVM) : CPUMGetHyperEFlags(pVM), rc));
959#endif
960 } while ( rc == VINF_SUCCESS
961 || rc == VINF_EM_RAW_INTERRUPT);
962 rc = CPUMRawLeave(pVM, NULL, rc);
963 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
964
965 /*
966 * Make sure the trap flag is cleared.
967 * (Too bad if the guest is trying to single step too.)
968 */
969 if (fGuest)
970 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
971 else
972 CPUMSetHyperEFlags(pVM, CPUMGetHyperEFlags(pVM) & ~X86_EFL_TF);
973
974 /*
975 * Deal with the return codes.
976 */
977 rc = emR3HighPriorityPostForcedActions(pVM, rc);
978 rc = emR3RawHandleRC(pVM, pCtx, rc);
979 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
980 return rc;
981}
982
983
984#ifdef DEBUG
985
986/**
987 * Steps hardware accelerated mode.
988 *
989 * @returns VBox status code.
990 * @param pVM The VM handle.
991 */
992static int emR3HwAccStep(PVM pVM)
993{
994 Assert(pVM->em.s.enmState == EMSTATE_DEBUG_GUEST_HWACC);
995
996 int rc;
997 PCPUMCTX pCtx = pVM->em.s.pCtx;
998 VM_FF_CLEAR(pVM, (VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
999
1000 /*
1001 * Check vital forced actions, but ignore pending interrupts and timers.
1002 */
1003 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1004 {
1005 rc = emR3RawForcedActions(pVM, pCtx);
1006 if (VBOX_FAILURE(rc))
1007 return rc;
1008 }
1009 /*
1010 * Set flags for single stepping.
1011 */
1012 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) | X86_EFL_TF | X86_EFL_RF);
1013
1014 /*
1015 * Single step.
1016 * We do not start time or anything, if anything we should just do a few nanoseconds.
1017 */
1018 do
1019 {
1020 rc = VMMR3HwAccRunGC(pVM);
1021 } while ( rc == VINF_SUCCESS
1022 || rc == VINF_EM_RAW_INTERRUPT);
1023 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
1024
1025 /*
1026 * Make sure the trap flag is cleared.
1027 * (Too bad if the guest is trying to single step too.)
1028 */
1029 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1030
1031 /*
1032 * Deal with the return codes.
1033 */
1034 rc = emR3HighPriorityPostForcedActions(pVM, rc);
1035 rc = emR3RawHandleRC(pVM, pCtx, rc);
1036 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
1037 return rc;
1038}
1039
1040
1041void emR3SingleStepExecRaw(PVM pVM, uint32_t cIterations)
1042{
1043 EMSTATE enmOldState = pVM->em.s.enmState;
1044
1045 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
1046
1047 Log(("Single step BEGIN:\n"));
1048 for(uint32_t i=0;i<cIterations;i++)
1049 {
1050 DBGFR3PrgStep(pVM);
1051 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1052 emR3RawStep(pVM);
1053 }
1054 Log(("Single step END:\n"));
1055 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1056 pVM->em.s.enmState = enmOldState;
1057}
1058
1059
1060void emR3SingleStepExecHwAcc(PVM pVM, uint32_t cIterations)
1061{
1062 EMSTATE enmOldState = pVM->em.s.enmState;
1063
1064 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_HWACC;
1065
1066 Log(("Single step BEGIN:\n"));
1067 for(uint32_t i=0;i<cIterations;i++)
1068 {
1069 DBGFR3PrgStep(pVM);
1070 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1071 emR3HwAccStep(pVM);
1072 }
1073 Log(("Single step END:\n"));
1074 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1075 pVM->em.s.enmState = enmOldState;
1076}
1077
1078
1079void emR3SingleStepExecRem(PVM pVM, uint32_t cIterations)
1080{
1081 EMSTATE enmOldState = pVM->em.s.enmState;
1082
1083 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
1084
1085 Log(("Single step BEGIN:\n"));
1086 for(uint32_t i=0;i<cIterations;i++)
1087 {
1088 DBGFR3PrgStep(pVM);
1089 DBGFR3DisasInstrCurrentLog(pVM, "RSS: ");
1090 emR3RemStep(pVM);
1091 }
1092 Log(("Single step END:\n"));
1093 CPUMSetGuestEFlags(pVM, CPUMGetGuestEFlags(pVM) & ~X86_EFL_TF);
1094 pVM->em.s.enmState = enmOldState;
1095}
1096
1097#endif /* DEBUG */
1098
1099
1100/**
1101 * Executes one (or perhaps a few more) instruction(s).
1102 *
1103 * @returns VBox status code suitable for EM.
1104 *
1105 * @param pVM VM handle.
1106 * @param rcGC GC return code
1107 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1108 * instruction and prefix the log output with this text.
1109 */
1110#ifdef LOG_ENABLED
1111static int emR3RawExecuteInstructionWorker(PVM pVM, int rcGC, const char *pszPrefix)
1112#else
1113static int emR3RawExecuteInstructionWorker(PVM pVM, int rcGC)
1114#endif
1115{
1116 PCPUMCTX pCtx = pVM->em.s.pCtx;
1117 int rc;
1118
1119 /*
1120 *
1121 * The simple solution is to use the recompiler.
1122 * The better solution is to disassemble the current instruction and
1123 * try handle as many as possible without using REM.
1124 *
1125 */
1126
1127#ifdef LOG_ENABLED
1128 /*
1129 * Disassemble the instruction if requested.
1130 */
1131 if (pszPrefix)
1132 {
1133 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
1134 DBGFR3DisasInstrCurrentLog(pVM, pszPrefix);
1135 }
1136#endif /* LOG_ENABLED */
1137
1138 /*
1139 * PATM is making life more interesting.
1140 * We cannot hand anything to REM which has an EIP inside patch code. So, we'll
1141 * tell PATM there is a trap in this code and have it take the appropriate actions
1142 * to allow us execute the code in REM.
1143 */
1144 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1145 {
1146 Log(("emR3RawExecuteInstruction: In patch block. eip=%VGv\n", pCtx->eip));
1147
1148 RTGCPTR pNewEip;
1149 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1150 switch (rc)
1151 {
1152 /*
1153 * It's not very useful to emulate a single instruction and then go back to raw
1154 * mode; just execute the whole block until IF is set again.
1155 */
1156 case VINF_SUCCESS:
1157 Log(("emR3RawExecuteInstruction: Executing instruction starting at new address %VGv IF=%d VMIF=%x\n",
1158 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1159 pCtx->eip = pNewEip;
1160 Assert(pCtx->eip);
1161
1162 if (pCtx->eflags.Bits.u1IF)
1163 {
1164 /*
1165 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1166 */
1167 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1168 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1169 }
1170 else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
1171 {
1172 /* special case: iret, that sets IF, detected a pending irq/event */
1173 return emR3RawExecuteInstruction(pVM, "PATCHIRET");
1174 }
1175 return VINF_EM_RESCHEDULE_REM;
1176
1177 /*
1178 * One instruction.
1179 */
1180 case VINF_PATCH_EMULATE_INSTR:
1181 Log(("emR3RawExecuteInstruction: Emulate patched instruction at %VGv IF=%d VMIF=%x\n",
1182 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1183 pCtx->eip = pNewEip;
1184 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1185
1186 /*
1187 * The patch was disabled, hand it to the REM.
1188 */
1189 case VERR_PATCH_DISABLED:
1190 Log(("emR3RawExecuteInstruction: Disabled patch -> new eip %VGv IF=%d VMIF=%x\n",
1191 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1192 pCtx->eip = pNewEip;
1193 if (pCtx->eflags.Bits.u1IF)
1194 {
1195 /*
1196 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1197 */
1198 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1199 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1200 }
1201 return VINF_EM_RESCHEDULE_REM;
1202
1203 /* Force continued patch exection; usually due to write monitored stack. */
1204 case VINF_PATCH_CONTINUE:
1205 return VINF_SUCCESS;
1206
1207 default:
1208 AssertReleaseMsgFailed(("Unknown return code %Vrc from PATMR3HandleTrap\n", rc));
1209 return VERR_INTERNAL_ERROR;
1210 }
1211 }
1212
1213#if 0 /// @todo Sander, this breaks the linux image (panics). So, I'm disabling it for now. (OP_MOV triggers it btw.)
1214 DISCPUSTATE Cpu;
1215 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &Cpu, "GEN EMU");
1216 if (VBOX_SUCCESS(rc))
1217 {
1218 uint32_t size;
1219
1220 switch (Cpu.pCurInstr->opcode)
1221 {
1222 case OP_MOV:
1223 case OP_AND:
1224 case OP_OR:
1225 case OP_XOR:
1226 case OP_POP:
1227 case OP_INC:
1228 case OP_DEC:
1229 case OP_XCHG:
1230 STAM_PROFILE_START(&pVM->em.s.StatMiscEmu, a);
1231 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1232 if (VBOX_SUCCESS(rc))
1233 {
1234 pCtx->eip += Cpu.opsize;
1235 STAM_PROFILE_STOP(&pVM->em.s.StatMiscEmu, a);
1236 return rc;
1237 }
1238 if (rc != VERR_EM_INTERPRETER)
1239 AssertMsgFailedReturn(("rc=%Vrc\n", rc), rc);
1240 STAM_PROFILE_STOP(&pVM->em.s.StatMiscEmu, a);
1241 break;
1242 }
1243 }
1244#endif
1245 STAM_PROFILE_START(&pVM->em.s.StatREMEmu, a);
1246 rc = REMR3EmulateInstruction(pVM);
1247 STAM_PROFILE_STOP(&pVM->em.s.StatREMEmu, a);
1248
1249 return rc;
1250}
1251
1252
1253/**
1254 * Executes one (or perhaps a few more) instruction(s).
1255 * This is just a wrapper for discarding pszPrefix in non-logging builds.
1256 *
1257 * @returns VBox status code suitable for EM.
1258 * @param pVM VM handle.
1259 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
1260 * instruction and prefix the log output with this text.
1261 * @param rcGC GC return code
1262 */
1263DECLINLINE(int) emR3RawExecuteInstruction(PVM pVM, const char *pszPrefix, int rcGC)
1264{
1265#ifdef LOG_ENABLED
1266 return emR3RawExecuteInstructionWorker(pVM, rcGC, pszPrefix);
1267#else
1268 return emR3RawExecuteInstructionWorker(pVM, rcGC);
1269#endif
1270}
1271
1272/**
1273 * Executes one (or perhaps a few more) IO instruction(s).
1274 *
1275 * @returns VBox status code suitable for EM.
1276 * @param pVM VM handle.
1277 */
1278int emR3RawExecuteIOInstruction(PVM pVM)
1279{
1280 int rc;
1281 PCPUMCTX pCtx = pVM->em.s.pCtx;
1282
1283 STAM_PROFILE_START(&pVM->em.s.StatIOEmu, a);
1284
1285 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
1286 * as io instructions tend to come in packages of more than one
1287 */
1288 DISCPUSTATE Cpu;
1289 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &Cpu, "IO EMU");
1290 if (VBOX_SUCCESS(rc))
1291 {
1292 rc = VINF_EM_RAW_EMULATE_INSTR;
1293
1294 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
1295 {
1296 switch (Cpu.pCurInstr->opcode)
1297 {
1298 case OP_IN:
1299 {
1300 STAM_COUNTER_INC(&pVM->em.s.CTXSUFF(pStats)->StatIn);
1301 rc = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1302 break;
1303 }
1304
1305 case OP_OUT:
1306 {
1307 STAM_COUNTER_INC(&pVM->em.s.CTXSUFF(pStats)->StatOut);
1308 rc = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1309 break;
1310 }
1311 }
1312 }
1313 else if (Cpu.prefix & PREFIX_REP)
1314 {
1315 switch (Cpu.pCurInstr->opcode)
1316 {
1317 case OP_INSB:
1318 case OP_INSWD:
1319 {
1320 STAM_COUNTER_INC(&pVM->em.s.CTXSUFF(pStats)->StatIn);
1321 rc = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1322 break;
1323 }
1324
1325 case OP_OUTSB:
1326 case OP_OUTSWD:
1327 {
1328 STAM_COUNTER_INC(&pVM->em.s.CTXSUFF(pStats)->StatOut);
1329 rc = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
1330 break;
1331 }
1332 }
1333 }
1334
1335 /*
1336 * Handled the I/O return codes.
1337 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1338 */
1339 if (IOM_SUCCESS(rc))
1340 {
1341 pCtx->eip += Cpu.opsize;
1342 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1343 return rc;
1344 }
1345
1346 if (rc == VINF_EM_RAW_GUEST_TRAP)
1347 {
1348 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1349 rc = emR3RawGuestTrap(pVM);
1350 return rc;
1351 }
1352 AssertMsg(rc != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
1353
1354 if (VBOX_FAILURE(rc))
1355 {
1356 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1357 return rc;
1358 }
1359 AssertMsg(rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RESCHEDULE_REM, ("rc=%Vrc\n", rc));
1360 }
1361 STAM_PROFILE_STOP(&pVM->em.s.StatIOEmu, a);
1362 return emR3RawExecuteInstruction(pVM, "IO: ");
1363}
1364
1365
1366/**
1367 * Handle a guest context trap.
1368 *
1369 * @returns VBox status code suitable for EM.
1370 * @param pVM VM handle.
1371 */
1372static int emR3RawGuestTrap(PVM pVM)
1373{
1374 PCPUMCTX pCtx = pVM->em.s.pCtx;
1375
1376 /*
1377 * Get the trap info.
1378 */
1379 uint8_t u8TrapNo;
1380 TRPMEVENT enmType;;
1381 RTGCUINT uErrorCode;
1382 RTGCUINTPTR uCR2;
1383 int rc = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1384 if (VBOX_FAILURE(rc))
1385 {
1386 AssertReleaseMsgFailed(("No trap! (rc=%Vrc)\n", rc));
1387 return rc;
1388 }
1389
1390 /* Traps can be directly forwarded in hardware accelerated mode. */
1391 if (HWACCMR3IsActive(pVM))
1392 {
1393#ifdef LOGGING_ENABLED
1394 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1395 DBGFR3DisasInstrCurrentLog(pVM, "Guest trap");
1396#endif
1397 return VINF_EM_RESCHEDULE_HWACC;
1398 }
1399
1400 /** Scan kernel code that traps; we might not get another chance. */
1401 if ( (pCtx->ss & X86_SEL_RPL) <= 1
1402 && !pCtx->eflags.Bits.u1VM)
1403 {
1404 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
1405 CSAMR3CheckCodeEx(pVM, pCtx->cs, &pCtx->csHid, pCtx->eip);
1406 }
1407
1408 if (u8TrapNo == 6) /* (#UD) Invalid opcode. */
1409 {
1410 DISCPUSTATE cpu;
1411
1412 /* If MONITOR & MWAIT are supported, then interpret them here. */
1413 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &cpu, "Guest Trap (#UD): ");
1414 if ( VBOX_SUCCESS(rc)
1415 && (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))
1416 {
1417 uint32_t u32Dummy, u32Features, u32ExtFeatures, size;
1418
1419 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);
1420
1421 if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)
1422 {
1423 rc = TRPMResetTrap(pVM);
1424 AssertRC(rc);
1425
1426 rc = EMInterpretInstructionCPU(pVM, &cpu, CPUMCTX2CORE(pCtx), 0, &size);
1427 if (VBOX_SUCCESS(rc))
1428 {
1429 pCtx->eip += cpu.opsize;
1430 return rc;
1431 }
1432 return emR3RawExecuteInstruction(pVM, "Monitor: ");
1433 }
1434 }
1435 }
1436 else if (u8TrapNo == 13) /* (#GP) Privileged exception */
1437 {
1438 DISCPUSTATE cpu;
1439
1440 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &cpu, "Guest Trap: ");
1441 if (VBOX_SUCCESS(rc) && (cpu.pCurInstr->optype & OPTYPE_PORTIO))
1442 {
1443 /*
1444 * We should really check the TSS for the IO bitmap, but it's not like this
1445 * lazy approach really makes things worse.
1446 */
1447 rc = TRPMResetTrap(pVM);
1448 AssertRC(rc);
1449 return emR3RawExecuteInstruction(pVM, "IO Guest Trap: ");
1450 }
1451 }
1452
1453#ifdef LOG_ENABLED
1454 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
1455 DBGFR3DisasInstrCurrentLog(pVM, "Guest trap");
1456
1457 /* Get guest page information. */
1458 uint64_t fFlags = 0;
1459 RTGCPHYS GCPhys = 0;
1460 int rc2 = PGMGstGetPage(pVM, uCR2, &fFlags, &GCPhys);
1461 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%VGp fFlags=%08llx %s %s %s%s rc2=%d\n",
1462 pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,
1463 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",
1464 fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));
1465#endif
1466
1467 /*
1468 * #PG has CR2.
1469 * (Because of stuff like above we must set CR2 in a delayed fashion.)
1470 */
1471 if (u8TrapNo == 14 /* #PG */)
1472 pCtx->cr2 = uCR2;
1473
1474 return VINF_EM_RESCHEDULE_REM;
1475}
1476
1477
1478/**
1479 * Handle a ring switch trap.
1480 * Need to do statistics and to install patches. The result is going to REM.
1481 *
1482 * @returns VBox status code suitable for EM.
1483 * @param pVM VM handle.
1484 */
1485int emR3RawRingSwitch(PVM pVM)
1486{
1487 int rc;
1488 DISCPUSTATE Cpu;
1489 PCPUMCTX pCtx = pVM->em.s.pCtx;
1490
1491 /*
1492 * sysenter, syscall & callgate
1493 */
1494 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &Cpu, "RSWITCH: ");
1495 if (VBOX_SUCCESS(rc))
1496 {
1497 if (Cpu.pCurInstr->opcode == OP_SYSENTER)
1498 {
1499 if (pCtx->SysEnter.cs != 0)
1500 {
1501 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid, pCtx->eip),
1502 SELMIsSelector32Bit(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) ? PATMFL_CODE32 : 0);
1503 if (VBOX_SUCCESS(rc))
1504 {
1505 DBGFR3DisasInstrCurrentLog(pVM, "Patched sysenter instruction");
1506 return VINF_EM_RESCHEDULE_RAW;
1507 }
1508 }
1509 }
1510
1511#ifdef VBOX_WITH_STATISTICS
1512 switch (Cpu.pCurInstr->opcode)
1513 {
1514 case OP_SYSENTER:
1515 STAM_COUNTER_INC(&pVM->em.s.CTXSUFF(pStats)->StatSysEnter);
1516 break;
1517 case OP_SYSEXIT:
1518 STAM_COUNTER_INC(&pVM->em.s.CTXSUFF(pStats)->StatSysExit);
1519 break;
1520 case OP_SYSCALL:
1521 STAM_COUNTER_INC(&pVM->em.s.CTXSUFF(pStats)->StatSysCall);
1522 break;
1523 case OP_SYSRET:
1524 STAM_COUNTER_INC(&pVM->em.s.CTXSUFF(pStats)->StatSysRet);
1525 break;
1526 }
1527#endif
1528 }
1529 else
1530 AssertRC(rc);
1531
1532 /* go to the REM to emulate a single instruction */
1533 return emR3RawExecuteInstruction(pVM, "RSWITCH: ");
1534}
1535
1536/**
1537 * Handle a trap (#PF or #GP) in patch code
1538 *
1539 * @returns VBox status code suitable for EM.
1540 * @param pVM VM handle.
1541 * @param pCtx CPU context
1542 * @param gcret GC return code
1543 */
1544int emR3PatchTrap(PVM pVM, PCPUMCTX pCtx, int gcret)
1545{
1546 uint8_t u8TrapNo;
1547 int rc;
1548 TRPMEVENT enmType;
1549 RTGCUINT uErrorCode;
1550 RTGCUINTPTR uCR2;
1551
1552 Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
1553
1554 if (gcret == VINF_PATM_PATCH_INT3)
1555 {
1556 u8TrapNo = 3;
1557 uCR2 = 0;
1558 uErrorCode = 0;
1559 }
1560 else
1561 if (gcret == VINF_PATM_PATCH_TRAP_GP)
1562 {
1563 /* No active trap in this case. Kind of ugly. */
1564 u8TrapNo = X86_XCPT_GP;
1565 uCR2 = 0;
1566 uErrorCode = 0;
1567 }
1568 else
1569 {
1570 rc = TRPMQueryTrapAll(pVM, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
1571 if (VBOX_FAILURE(rc))
1572 {
1573 AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Vrc) gcret=%Vrc\n", rc, gcret));
1574 return rc;
1575 }
1576 /* Reset the trap as we'll execute the original instruction again. */
1577 TRPMResetTrap(pVM);
1578 }
1579
1580 /*
1581 * Deal with traps inside patch code.
1582 * (This code won't run outside GC.)
1583 */
1584 if (u8TrapNo != 1)
1585 {
1586#ifdef LOG_ENABLED
1587 DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
1588 DBGFR3DisasInstrCurrentLog(pVM, "Patch code");
1589
1590 DISCPUSTATE Cpu;
1591 int rc;
1592
1593 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &Cpu, "Patch code: ");
1594 if ( VBOX_SUCCESS(rc)
1595 && Cpu.pCurInstr->opcode == OP_IRET)
1596 {
1597 uint32_t eip, selCS, uEFlags;
1598
1599 /* Iret crashes are bad as we have already changed the flags on the stack */
1600 rc = PGMPhysReadGCPtr(pVM, &eip, pCtx->esp, 4);
1601 rc |= PGMPhysReadGCPtr(pVM, &selCS, pCtx->esp+4, 4);
1602 rc |= PGMPhysReadGCPtr(pVM, &uEFlags, pCtx->esp+8, 4);
1603 if (rc == VINF_SUCCESS)
1604 {
1605 if ( (uEFlags & X86_EFL_VM)
1606 || (selCS & X86_SEL_RPL) == 3)
1607 {
1608 uint32_t selSS, esp;
1609
1610 rc |= PGMPhysReadGCPtr(pVM, &esp, pCtx->esp + 12, 4);
1611 rc |= PGMPhysReadGCPtr(pVM, &selSS, pCtx->esp + 16, 4);
1612
1613 if (uEFlags & X86_EFL_VM)
1614 {
1615 uint32_t selDS, selES, selFS, selGS;
1616 rc = PGMPhysReadGCPtr(pVM, &selES, pCtx->esp + 20, 4);
1617 rc |= PGMPhysReadGCPtr(pVM, &selDS, pCtx->esp + 24, 4);
1618 rc |= PGMPhysReadGCPtr(pVM, &selFS, pCtx->esp + 28, 4);
1619 rc |= PGMPhysReadGCPtr(pVM, &selGS, pCtx->esp + 32, 4);
1620 if (rc == VINF_SUCCESS)
1621 {
1622 Log(("Patch code: IRET->VM stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
1623 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
1624 }
1625 }
1626 else
1627 Log(("Patch code: IRET stack frame: return address %04X:%VGv eflags=%08x ss:esp=%04X:%VGv\n", selCS, eip, uEFlags, selSS, esp));
1628 }
1629 else
1630 Log(("Patch code: IRET stack frame: return address %04X:%VGv eflags=%08x\n", selCS, eip, uEFlags));
1631 }
1632 }
1633#endif
1634 Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
1635 pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
1636
1637 RTGCPTR pNewEip;
1638 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
1639 switch (rc)
1640 {
1641 /*
1642 * Execute the faulting instruction.
1643 */
1644 case VINF_SUCCESS:
1645 {
1646 /** @todo execute a whole block */
1647 Log(("emR3PatchTrap: Executing faulting instruction at new address %VGv\n", pNewEip));
1648 if (!(pVM->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1649 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1650
1651 pCtx->eip = pNewEip;
1652 AssertRelease(pCtx->eip);
1653
1654 if (pCtx->eflags.Bits.u1IF)
1655 {
1656 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an
1657 * int3 patch overwrites it and leads to blue screens. Remove the patch in this case.
1658 */
1659 if ( u8TrapNo == X86_XCPT_GP
1660 && PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))
1661 {
1662 /** @todo move to PATMR3HandleTrap */
1663 Log(("Possible Windows XP iret fault at %VGv\n", pCtx->eip));
1664 PATMR3RemovePatch(pVM, pCtx->eip);
1665 }
1666
1667 /** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
1668 /** @note possibly because a reschedule is required (e.g. iret to V86 code) */
1669
1670 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1671 /* Interrupts are enabled; just go back to the original instruction.
1672 return VINF_SUCCESS; */
1673 }
1674 return VINF_EM_RESCHEDULE_REM;
1675 }
1676
1677 /*
1678 * One instruction.
1679 */
1680 case VINF_PATCH_EMULATE_INSTR:
1681 Log(("emR3PatchTrap: Emulate patched instruction at %VGv IF=%d VMIF=%x\n",
1682 pNewEip, pCtx->eflags.Bits.u1IF, pVM->em.s.pPatmGCState->uVMFlags));
1683 pCtx->eip = pNewEip;
1684 AssertRelease(pCtx->eip);
1685 return emR3RawExecuteInstruction(pVM, "PATCHEMUL: ");
1686
1687 /*
1688 * The patch was disabled, hand it to the REM.
1689 */
1690 case VERR_PATCH_DISABLED:
1691 if (!(pVM->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
1692 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
1693 pCtx->eip = pNewEip;
1694 AssertRelease(pCtx->eip);
1695
1696 if (pCtx->eflags.Bits.u1IF)
1697 {
1698 /*
1699 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
1700 */
1701 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
1702 return emR3RawExecuteInstruction(pVM, "PATCHIR");
1703 }
1704 return VINF_EM_RESCHEDULE_REM;
1705
1706 /* Force continued patch exection; usually due to write monitored stack. */
1707 case VINF_PATCH_CONTINUE:
1708 return VINF_SUCCESS;
1709
1710 /*
1711 * Anything else is *fatal*.
1712 */
1713 default:
1714 AssertReleaseMsgFailed(("Unknown return code %Vrc from PATMR3HandleTrap!\n", rc));
1715 return VERR_INTERNAL_ERROR;
1716 }
1717 }
1718 return VINF_SUCCESS;
1719}
1720
1721
1722/**
1723 * Handle a privileged instruction.
1724 *
1725 * @returns VBox status code suitable for EM.
1726 * @param pVM VM handle.
1727 */
1728int emR3RawPrivileged(PVM pVM)
1729{
1730 STAM_PROFILE_START(&pVM->em.s.StatPrivEmu, a);
1731 PCPUMCTX pCtx = pVM->em.s.pCtx;
1732
1733 Assert(!pCtx->eflags.Bits.u1VM);
1734
1735 if (PATMIsEnabled(pVM))
1736 {
1737 /*
1738 * Check if in patch code.
1739 */
1740 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
1741 {
1742#ifdef LOG_ENABLED
1743 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1744#endif
1745 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
1746 return VERR_EM_RAW_PATCH_CONFLICT;
1747 }
1748 if ( (pCtx->ss & X86_SEL_RPL) == 0
1749 && !pCtx->eflags.Bits.u1VM
1750 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
1751 {
1752 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid, pCtx->eip),
1753 SELMIsSelector32Bit(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) ? PATMFL_CODE32 : 0);
1754 if (VBOX_SUCCESS(rc))
1755 {
1756#ifdef LOG_ENABLED
1757 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1758#endif
1759 DBGFR3DisasInstrCurrentLog(pVM, "Patched privileged instruction");
1760 return VINF_SUCCESS;
1761 }
1762 }
1763 }
1764
1765#ifdef LOG_ENABLED
1766 if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
1767 {
1768 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1769 DBGFR3DisasInstrCurrentLog(pVM, "Privileged instr: ");
1770 }
1771#endif
1772
1773 /*
1774 * Instruction statistics and logging.
1775 */
1776 DISCPUSTATE Cpu;
1777 int rc;
1778
1779 rc = CPUMR3DisasmInstrCPU(pVM, pCtx, pCtx->eip, &Cpu, "PRIV: ");
1780 if (VBOX_SUCCESS(rc))
1781 {
1782#ifdef VBOX_WITH_STATISTICS
1783 PEMSTATS pStats = pVM->em.s.CTXSUFF(pStats);
1784 switch (Cpu.pCurInstr->opcode)
1785 {
1786 case OP_INVLPG:
1787 STAM_COUNTER_INC(&pStats->StatInvlpg);
1788 break;
1789 case OP_IRET:
1790 STAM_COUNTER_INC(&pStats->StatIret);
1791 break;
1792 case OP_CLI:
1793 STAM_COUNTER_INC(&pStats->StatCli);
1794 emR3RecordCli(pVM, pCtx->eip);
1795 break;
1796 case OP_STI:
1797 STAM_COUNTER_INC(&pStats->StatSti);
1798 break;
1799 case OP_INSB:
1800 case OP_INSWD:
1801 case OP_IN:
1802 case OP_OUTSB:
1803 case OP_OUTSWD:
1804 case OP_OUT:
1805 AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));
1806 break;
1807
1808 case OP_MOV_CR:
1809 if (Cpu.param1.flags & USE_REG_GEN32)
1810 {
1811 //read
1812 Assert(Cpu.param2.flags & USE_REG_CR);
1813 Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);
1814 STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);
1815 }
1816 else
1817 {
1818 //write
1819 Assert(Cpu.param1.flags & USE_REG_CR);
1820 Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);
1821 STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);
1822 }
1823 break;
1824
1825 case OP_MOV_DR:
1826 STAM_COUNTER_INC(&pStats->StatMovDRx);
1827 break;
1828 case OP_LLDT:
1829 STAM_COUNTER_INC(&pStats->StatMovLldt);
1830 break;
1831 case OP_LIDT:
1832 STAM_COUNTER_INC(&pStats->StatMovLidt);
1833 break;
1834 case OP_LGDT:
1835 STAM_COUNTER_INC(&pStats->StatMovLgdt);
1836 break;
1837 case OP_SYSENTER:
1838 STAM_COUNTER_INC(&pStats->StatSysEnter);
1839 break;
1840 case OP_SYSEXIT:
1841 STAM_COUNTER_INC(&pStats->StatSysExit);
1842 break;
1843 case OP_SYSCALL:
1844 STAM_COUNTER_INC(&pStats->StatSysCall);
1845 break;
1846 case OP_SYSRET:
1847 STAM_COUNTER_INC(&pStats->StatSysRet);
1848 break;
1849 case OP_HLT:
1850 STAM_COUNTER_INC(&pStats->StatHlt);
1851 break;
1852 default:
1853 STAM_COUNTER_INC(&pStats->StatMisc);
1854 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));
1855 break;
1856 }
1857#endif
1858 if ( (pCtx->ss & X86_SEL_RPL) == 0
1859 && !pCtx->eflags.Bits.u1VM
1860 && SELMIsSelector32Bit(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid))
1861 {
1862 uint32_t size;
1863
1864 STAM_PROFILE_START(&pVM->em.s.StatPrivEmu, a);
1865 switch (Cpu.pCurInstr->opcode)
1866 {
1867 case OP_CLI:
1868 pCtx->eflags.u32 &= ~X86_EFL_IF;
1869 Assert(Cpu.opsize == 1);
1870 pCtx->eip += Cpu.opsize;
1871 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1872 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */
1873
1874 case OP_STI:
1875 pCtx->eflags.u32 |= X86_EFL_IF;
1876 EMSetInhibitInterruptsPC(pVM, pCtx->eip + Cpu.opsize);
1877 Assert(Cpu.opsize == 1);
1878 pCtx->eip += Cpu.opsize;
1879 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1880 return VINF_SUCCESS;
1881
1882 case OP_HLT:
1883 if (PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip))
1884 {
1885 PATMTRANSSTATE enmState;
1886 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
1887
1888 if (enmState == PATMTRANS_OVERWRITTEN)
1889 {
1890 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
1891 Assert(rc == VERR_PATCH_DISABLED);
1892 /* Conflict detected, patch disabled */
1893 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %VGv\n", pCtx->eip));
1894
1895 enmState = PATMTRANS_SAFE;
1896 }
1897
1898 /* The translation had better be successful. Otherwise we can't recover. */
1899 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %VGv\n", pCtx->eip));
1900 if (enmState != PATMTRANS_OVERWRITTEN)
1901 pCtx->eip = pOrgInstrGC;
1902 }
1903 /* no break; we could just return VINF_EM_HALT here */
1904
1905 case OP_MOV_CR:
1906 case OP_MOV_DR:
1907#ifdef LOG_ENABLED
1908 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1909 {
1910 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1911 DBGFR3DisasInstrCurrentLog(pVM, "Privileged instr: ");
1912 }
1913#endif
1914
1915 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1916 if (VBOX_SUCCESS(rc))
1917 {
1918 pCtx->eip += Cpu.opsize;
1919 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1920
1921 if ( Cpu.pCurInstr->opcode == OP_MOV_CR
1922 && Cpu.param1.flags == USE_REG_CR /* write */
1923 )
1924 {
1925 /* Reschedule is necessary as the execution/paging mode might have changed. */
1926 return VINF_EM_RESCHEDULE;
1927 }
1928 return rc; /* can return VINF_EM_HALT as well. */
1929 }
1930 AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Vrc\n", rc), rc);
1931 break; /* fall back to the recompiler */
1932 }
1933 STAM_PROFILE_STOP(&pVM->em.s.StatPrivEmu, a);
1934 }
1935 }
1936
1937 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1938 return emR3PatchTrap(pVM, pCtx, VINF_PATM_PATCH_TRAP_GP);
1939
1940 return emR3RawExecuteInstruction(pVM, "PRIV");
1941}
1942
1943
1944/**
1945 * Update the forced rawmode execution modifier.
1946 *
1947 * This function is called when we're returning from the raw-mode loop(s). If we're
1948 * in patch code, it will set a flag forcing execution to be resumed in raw-mode,
1949 * if not in patch code, the flag will be cleared.
1950 *
1951 * We should never interrupt patch code while it's being executed. Cli patches can
1952 * contain big code blocks, but they are always executed with IF=0. Other patches
1953 * replace single instructions and should be atomic.
1954 *
1955 * @returns Updated rc.
1956 *
1957 * @param pVM The VM handle.
1958 * @param pCtx The guest CPU context.
1959 * @param rc The result code.
1960 */
1961DECLINLINE(int) emR3RawUpdateForceFlag(PVM pVM, PCPUMCTX pCtx, int rc)
1962{
1963 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */
1964 {
1965 /* ignore reschedule attempts. */
1966 switch (rc)
1967 {
1968 case VINF_EM_RESCHEDULE:
1969 case VINF_EM_RESCHEDULE_REM:
1970 rc = VINF_SUCCESS;
1971 break;
1972 }
1973 pVM->em.s.fForceRAW = true;
1974 }
1975 else
1976 pVM->em.s.fForceRAW = false;
1977 return rc;
1978}
1979
1980
1981/**
1982 * Process a subset of the raw-mode return code.
1983 *
1984 * Since we have to share this with raw-mode single stepping, this inline
1985 * function has been created to avoid code duplication.
1986 *
1987 * @returns VINF_SUCCESS if it's ok to continue raw mode.
1988 * @returns VBox status code to return to the EM main loop.
1989 *
1990 * @param pVM The VM handle
1991 * @param rc The return code.
1992 * @param pCtx The guest cpu context.
1993 */
1994DECLINLINE(int) emR3RawHandleRC(PVM pVM, PCPUMCTX pCtx, int rc)
1995{
1996 switch (rc)
1997 {
1998 /*
1999 * Common & simple ones.
2000 */
2001 case VINF_SUCCESS:
2002 break;
2003 case VINF_EM_RESCHEDULE_RAW:
2004 case VINF_EM_RESCHEDULE_HWACC:
2005 case VINF_EM_RAW_INTERRUPT:
2006 case VINF_EM_RAW_TO_R3:
2007 case VINF_EM_RAW_TIMER_PENDING:
2008 case VINF_EM_PENDING_REQUEST:
2009 rc = VINF_SUCCESS;
2010 break;
2011
2012 /*
2013 * Privileged instruction.
2014 */
2015 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2016 case VINF_PATM_PATCH_TRAP_GP:
2017 rc = emR3RawPrivileged(pVM);
2018 break;
2019
2020 /*
2021 * Got a trap which needs dispatching.
2022 */
2023 case VINF_EM_RAW_GUEST_TRAP:
2024 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
2025 {
2026 AssertReleaseMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", CPUMGetGuestEIP(pVM)));
2027 rc = VERR_EM_RAW_PATCH_CONFLICT;
2028 break;
2029 }
2030
2031 Assert(TRPMHasTrap(pVM));
2032 Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2033
2034 if (TRPMHasTrap(pVM))
2035 {
2036 uint8_t u8Interrupt;
2037 uint32_t uErrorCode;
2038 TRPMERRORCODE enmError = TRPM_TRAP_NO_ERRORCODE;
2039
2040 rc = TRPMQueryTrapAll(pVM, &u8Interrupt, NULL, &uErrorCode, NULL);
2041 AssertRC(rc);
2042
2043 if (uErrorCode != ~0U)
2044 enmError = TRPM_TRAP_HAS_ERRORCODE;
2045
2046 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
2047 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
2048 {
2049 CSAMR3CheckGates(pVM, u8Interrupt, 1);
2050 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
2051
2052 /** If it was successful, then we could go back to raw mode. */
2053 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER)
2054 {
2055 /* Must check pending forced actions as our IDT or GDT might be out of sync */
2056 EMR3CheckRawForcedActions(pVM);
2057
2058 rc = TRPMForwardTrap(pVM, CPUMCTX2CORE(pCtx), u8Interrupt, uErrorCode, enmError, TRPM_TRAP, -1);
2059 if (rc == VINF_SUCCESS /* Don't use VBOX_SUCCESS */)
2060 {
2061 TRPMResetTrap(pVM);
2062 return VINF_EM_RESCHEDULE_RAW;
2063 }
2064 }
2065 }
2066 }
2067 rc = emR3RawGuestTrap(pVM);
2068 break;
2069
2070 /*
2071 * Trap in patch code.
2072 */
2073 case VINF_PATM_PATCH_TRAP_PF:
2074 case VINF_PATM_PATCH_INT3:
2075 rc = emR3PatchTrap(pVM, pCtx, rc);
2076 break;
2077
2078 case VINF_PATM_DUPLICATE_FUNCTION:
2079 Assert(PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2080 rc = PATMR3DuplicateFunctionRequest(pVM, pCtx);
2081 AssertRC(rc);
2082 rc = VINF_SUCCESS;
2083 break;
2084
2085 case VINF_PATM_CHECK_PATCH_PAGE:
2086 rc = PATMR3HandleMonitoredPage(pVM);
2087 AssertRC(rc);
2088 rc = VINF_SUCCESS;
2089 break;
2090
2091 /*
2092 * Patch manager.
2093 */
2094 case VERR_EM_RAW_PATCH_CONFLICT:
2095 AssertReleaseMsgFailed(("%Vrc handling is not yet implemented\n", rc));
2096 break;
2097
2098 /*
2099 * Memory mapped I/O access - attempt to patch the instruction
2100 */
2101 case VINF_PATM_HC_MMIO_PATCH_READ:
2102 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid, pCtx->eip),
2103 PATMFL_MMIO_ACCESS | (SELMIsSelector32Bit(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) ? PATMFL_CODE32 : 0));
2104 if (VBOX_FAILURE(rc))
2105 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2106 break;
2107
2108 case VINF_PATM_HC_MMIO_PATCH_WRITE:
2109 AssertFailed(); /* not yet implemented. */
2110 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2111 break;
2112
2113 /*
2114 * Conflict or out of page tables.
2115 *
2116 * VM_FF_PGM_SYNC_CR3 is set by the hypervisor and all we need to
2117 * do here is to execute the pending forced actions.
2118 */
2119 case VINF_PGM_SYNC_CR3:
2120 AssertMsg(VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL),
2121 ("VINF_PGM_SYNC_CR3 and no VM_FF_PGM_SYNC_CR3*!\n"));
2122 rc = VINF_SUCCESS;
2123 break;
2124
2125 /*
2126 * Paging mode change.
2127 */
2128 case VINF_PGM_CHANGE_MODE:
2129 rc = PGMChangeMode(pVM, pCtx->cr0, pCtx->cr4, pCtx->msrEFER);
2130 if (VBOX_SUCCESS(rc))
2131 rc = VINF_EM_RESCHEDULE;
2132 break;
2133
2134 /*
2135 * CSAM wants to perform a task in ring-3. It has set an FF action flag.
2136 */
2137 case VINF_CSAM_PENDING_ACTION:
2138 rc = VINF_SUCCESS;
2139 break;
2140
2141 /*
2142 * Invoked Interrupt gate - must directly (!) go to the recompiler.
2143 */
2144 case VINF_EM_RAW_INTERRUPT_PENDING:
2145 case VINF_EM_RAW_RING_SWITCH_INT:
2146 {
2147 uint8_t u8Interrupt;
2148
2149 Assert(TRPMHasTrap(pVM));
2150 Assert(!PATMIsPatchGCAddr(pVM, (RTGCPTR)pCtx->eip));
2151
2152 if (TRPMHasTrap(pVM))
2153 {
2154 u8Interrupt = TRPMGetTrapNo(pVM);
2155
2156 /* If the guest gate is marked unpatched, then we will check again if we can patch it. */
2157 if (TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) == TRPM_INVALID_HANDLER)
2158 {
2159 CSAMR3CheckGates(pVM, u8Interrupt, 1);
2160 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8Interrupt, TRPMR3GetGuestTrapHandler(pVM, u8Interrupt) != TRPM_INVALID_HANDLER));
2161 /** @note If it was successful, then we could go back to raw mode, but let's keep things simple for now. */
2162 }
2163 }
2164 rc = VINF_EM_RESCHEDULE_REM;
2165 break;
2166 }
2167
2168 /*
2169 * Other ring switch types.
2170 */
2171 case VINF_EM_RAW_RING_SWITCH:
2172 rc = emR3RawRingSwitch(pVM);
2173 break;
2174
2175 /*
2176 * REMGCNotifyInvalidatePage() failed because of overflow.
2177 */
2178 case VERR_REM_FLUSHED_PAGES_OVERFLOW:
2179 Assert((pCtx->ss & X86_SEL_RPL) != 1);
2180 REMR3ReplayInvalidatedPages(pVM);
2181 rc = VINF_SUCCESS;
2182 break;
2183
2184 /*
2185 * I/O Port access - emulate the instruction.
2186 */
2187 case VINF_IOM_HC_IOPORT_READ:
2188 case VINF_IOM_HC_IOPORT_WRITE:
2189 rc = emR3RawExecuteIOInstruction(pVM);
2190 break;
2191
2192 /*
2193 * Memory mapped I/O access - emulate the instruction.
2194 */
2195 case VINF_IOM_HC_MMIO_READ:
2196 case VINF_IOM_HC_MMIO_WRITE:
2197 case VINF_IOM_HC_MMIO_READ_WRITE:
2198 rc = emR3RawExecuteInstruction(pVM, "MMIO");
2199 break;
2200
2201 /*
2202 * Execute instruction.
2203 */
2204 case VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT:
2205 rc = emR3RawExecuteInstruction(pVM, "LDT FAULT: ");
2206 break;
2207 case VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT:
2208 rc = emR3RawExecuteInstruction(pVM, "GDT FAULT: ");
2209 break;
2210 case VINF_EM_RAW_EMULATE_INSTR_IDT_FAULT:
2211 rc = emR3RawExecuteInstruction(pVM, "IDT FAULT: ");
2212 break;
2213 case VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT:
2214 rc = emR3RawExecuteInstruction(pVM, "TSS FAULT: ");
2215 break;
2216 case VINF_EM_RAW_EMULATE_INSTR_PD_FAULT:
2217 rc = emR3RawExecuteInstruction(pVM, "PD FAULT: ");
2218 break;
2219
2220 case VINF_EM_RAW_EMULATE_INSTR_HLT:
2221 /** @todo skip instruction and go directly to the halt state. (see REM for implementation details) */
2222 rc = emR3RawPrivileged(pVM);
2223 break;
2224
2225 case VINF_PATM_PENDING_IRQ_AFTER_IRET:
2226 rc = emR3RawExecuteInstruction(pVM, "EMUL: ", VINF_PATM_PENDING_IRQ_AFTER_IRET);
2227 break;
2228
2229 case VINF_EM_RAW_EMULATE_INSTR:
2230 case VINF_PATCH_EMULATE_INSTR:
2231 rc = emR3RawExecuteInstruction(pVM, "EMUL: ");
2232 break;
2233
2234 /*
2235 * Stale selector and iret traps => REM.
2236 */
2237 case VINF_EM_RAW_STALE_SELECTOR:
2238 case VINF_EM_RAW_IRET_TRAP:
2239 /* We will not go to the recompiler if EIP points to patch code. */
2240 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
2241 {
2242 pCtx->eip = PATMR3PatchToGCPtr(pVM, (RTGCPTR)pCtx->eip, 0);
2243 }
2244 LogFlow(("emR3RawHandleRC: %Vrc -> %Vrc\n", rc, VINF_EM_RESCHEDULE_REM));
2245 rc = VINF_EM_RESCHEDULE_REM;
2246 break;
2247
2248 /*
2249 * Up a level.
2250 */
2251 case VINF_EM_TERMINATE:
2252 case VINF_EM_OFF:
2253 case VINF_EM_RESET:
2254 case VINF_EM_SUSPEND:
2255 case VINF_EM_HALT:
2256 case VINF_EM_RESUME:
2257 case VINF_EM_RESCHEDULE:
2258 case VINF_EM_RESCHEDULE_REM:
2259 break;
2260
2261 /*
2262 * Up a level and invoke the debugger.
2263 */
2264 case VINF_EM_DBG_STEPPED:
2265 case VINF_EM_DBG_BREAKPOINT:
2266 case VINF_EM_DBG_STEP:
2267 case VINF_EM_DBG_HYPER_ASSERTION:
2268 case VINF_EM_DBG_HYPER_BREAKPOINT:
2269 case VINF_EM_DBG_HYPER_STEPPED:
2270 case VINF_EM_DBG_STOP:
2271 break;
2272
2273 /*
2274 * Up a level, dump and debug.
2275 */
2276 case VERR_TRPM_DONT_PANIC:
2277 case VERR_TRPM_PANIC:
2278 break;
2279
2280 /*
2281 * Anything which is not known to us means an internal error
2282 * and the termination of the VM!
2283 */
2284 default:
2285 AssertMsgFailed(("Unknown GC return code: %Vra\n", rc));
2286 break;
2287 }
2288 return rc;
2289}
2290
2291/**
2292 * Check for pending raw actions
2293 *
2294 * @returns VBox status code.
2295 * @param pVM The VM to operate on.
2296 */
2297EMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM)
2298{
2299 return emR3RawForcedActions(pVM, pVM->em.s.pCtx);
2300}
2301
2302
2303/**
2304 * Process raw-mode specific forced actions.
2305 *
2306 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
2307 *
2308 * @returns VBox status code.
2309 * Only the normal success/failure stuff, no VINF_EM_*.
2310 * @param pVM The VM handle.
2311 * @param pCtx The guest CPUM register context.
2312 */
2313static int emR3RawForcedActions(PVM pVM, PCPUMCTX pCtx)
2314{
2315 /*
2316 * Note that the order is *vitally* important!
2317 * Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.
2318 */
2319
2320
2321 /*
2322 * Sync selector tables.
2323 */
2324 if (VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT))
2325 {
2326 int rc = SELMR3UpdateFromCPUM(pVM);
2327 if (VBOX_FAILURE(rc))
2328 return rc;
2329 }
2330
2331 /*
2332 * Sync IDT.
2333 */
2334 if (VM_FF_ISSET(pVM, VM_FF_TRPM_SYNC_IDT))
2335 {
2336 int rc = TRPMR3SyncIDT(pVM);
2337 if (VBOX_FAILURE(rc))
2338 return rc;
2339 }
2340
2341 /*
2342 * Sync TSS.
2343 */
2344 if (VM_FF_ISSET(pVM, VM_FF_SELM_SYNC_TSS))
2345 {
2346 int rc = SELMR3SyncTSS(pVM);
2347 if (VBOX_FAILURE(rc))
2348 return rc;
2349 }
2350
2351 /*
2352 * Sync page directory.
2353 */
2354 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
2355 {
2356 int rc = PGMSyncCR3(pVM, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
2357 if (VBOX_FAILURE(rc))
2358 return rc;
2359
2360 Assert(!VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT));
2361
2362 /* Prefetch pages for EIP and ESP */
2363 /** @todo This is rather expensive. Should investigate if it really helps at all. */
2364 rc = PGMPrefetchPage(pVM, SELMToFlat(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid, pCtx->eip));
2365 if (rc == VINF_SUCCESS)
2366 rc = PGMPrefetchPage(pVM, SELMToFlat(pVM, pCtx->eflags, pCtx->ss, &pCtx->ssHid, pCtx->esp));
2367 if (rc != VINF_SUCCESS)
2368 {
2369 if (rc != VINF_PGM_SYNC_CR3)
2370 return rc;
2371 rc = PGMSyncCR3(pVM, pCtx->cr0, pCtx->cr3, pCtx->cr4, VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
2372 if (VBOX_FAILURE(rc))
2373 return rc;
2374 }
2375 /** @todo maybe prefetch the supervisor stack page as well */
2376 }
2377
2378 /*
2379 * Allocate handy pages (just in case the above actions have consumed some pages).
2380 */
2381 if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
2382 {
2383 int rc = PGMR3PhysAllocateHandyPages(pVM);
2384 if (VBOX_FAILURE(rc))
2385 return rc;
2386 }
2387
2388 return VINF_SUCCESS;
2389}
2390
2391
2392/**
2393 * Executes raw code.
2394 *
2395 * This function contains the raw-mode version of the inner
2396 * execution loop (the outer loop being in EMR3ExecuteVM()).
2397 *
2398 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
2399 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2400 *
2401 * @param pVM VM handle.
2402 * @param pfFFDone Where to store an indicator telling whether or not
2403 * FFs were done before returning.
2404 */
2405static int emR3RawExecute(PVM pVM, bool *pfFFDone)
2406{
2407 STAM_PROFILE_ADV_START(&pVM->em.s.StatRAWTotal, a);
2408
2409 int rc = VERR_INTERNAL_ERROR;
2410 PCPUMCTX pCtx = pVM->em.s.pCtx;
2411 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
2412 pVM->em.s.fForceRAW = false;
2413 *pfFFDone = false;
2414
2415
2416 /*
2417 *
2418 * Spin till we get a forced action or raw mode status code resulting in
2419 * in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.
2420 *
2421 */
2422 for (;;)
2423 {
2424 STAM_PROFILE_ADV_START(&pVM->em.s.StatRAWEntry, b);
2425
2426 /*
2427 * Check various preconditions.
2428 */
2429#ifdef VBOX_STRICT
2430 Assert(REMR3QueryPendingInterrupt(pVM) == REM_NO_PENDING_IRQ);
2431 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);
2432 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
2433 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
2434 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
2435 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
2436 && PGMR3MapHasConflicts(pVM, pCtx->cr3, pVM->fRawR0Enabled))
2437 {
2438 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2439 return VERR_INTERNAL_ERROR;
2440 }
2441#endif /* VBOX_STRICT */
2442
2443 /*
2444 * Process high priority pre-execution raw-mode FFs.
2445 */
2446 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2447 {
2448 rc = emR3RawForcedActions(pVM, pCtx);
2449 if (VBOX_FAILURE(rc))
2450 break;
2451 }
2452
2453 /*
2454 * If we're going to execute ring-0 code, the guest state needs to
2455 * be modified a bit and some of the state components (IF, SS/CS RPL,
2456 * and perhaps EIP) needs to be stored with PATM.
2457 */
2458 rc = CPUMRawEnter(pVM, NULL);
2459 if (rc != VINF_SUCCESS)
2460 {
2461 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWEntry, b);
2462 break;
2463 }
2464
2465 /*
2466 * Scan code before executing it. Don't bother with user mode or V86 code
2467 */
2468 if ( (pCtx->ss & X86_SEL_RPL) <= 1
2469 && !pCtx->eflags.Bits.u1VM
2470 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
2471 {
2472 STAM_PROFILE_ADV_SUSPEND(&pVM->em.s.StatRAWEntry, b);
2473 CSAMR3CheckCodeEx(pVM, pCtx->cs, &pCtx->csHid, pCtx->eip);
2474 STAM_PROFILE_ADV_RESUME(&pVM->em.s.StatRAWEntry, b);
2475 }
2476
2477#ifdef LOG_ENABLED
2478 /*
2479 * Log important stuff before entering GC.
2480 */
2481 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
2482 if (pCtx->eflags.Bits.u1VM)
2483 Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2484 else if ((pCtx->ss & X86_SEL_RPL) == 1)
2485 {
2486 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
2487 Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));
2488 }
2489 else if ((pCtx->ss & X86_SEL_RPL) == 3)
2490 Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
2491#endif /* LOG_ENABLED */
2492
2493
2494
2495 /*
2496 * Execute the code.
2497 */
2498 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWEntry, b);
2499 STAM_PROFILE_START(&pVM->em.s.StatRAWExec, c);
2500 VMMR3Unlock(pVM);
2501 rc = VMMR3RawRunGC(pVM);
2502 VMMR3Lock(pVM);
2503 STAM_PROFILE_STOP(&pVM->em.s.StatRAWExec, c);
2504 STAM_PROFILE_ADV_START(&pVM->em.s.StatRAWTail, d);
2505
2506 LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));
2507 LogFlow(("VMMR3RawRunGC returned %Vrc\n", rc));
2508
2509 /*
2510 * Restore the real CPU state and deal with high priority post
2511 * execution FFs before doing anything else.
2512 */
2513 rc = CPUMRawLeave(pVM, NULL, rc);
2514 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
2515 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
2516 rc = emR3HighPriorityPostForcedActions(pVM, rc);
2517
2518#ifdef VBOX_STRICT
2519 /*
2520 * Assert TSS consistency & rc vs patch code.
2521 */
2522 if ( !VM_FF_ISPENDING(pVM, VM_FF_SELM_SYNC_TSS | VM_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
2523 && EMIsRawRing0Enabled(pVM))
2524 SELMR3CheckTSS(pVM);
2525 switch (rc)
2526 {
2527 case VINF_SUCCESS:
2528 case VINF_EM_RAW_INTERRUPT:
2529 case VINF_PATM_PATCH_TRAP_PF:
2530 case VINF_PATM_PATCH_TRAP_GP:
2531 case VINF_PATM_PATCH_INT3:
2532 case VINF_PATM_CHECK_PATCH_PAGE:
2533 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
2534 case VINF_EM_RAW_GUEST_TRAP:
2535 case VINF_EM_RESCHEDULE_RAW:
2536 break;
2537
2538 default:
2539 if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))
2540 LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %VGv for reason %Vrc\n", CPUMGetGuestEIP(pVM), rc));
2541 break;
2542 }
2543 /*
2544 * Let's go paranoid!
2545 */
2546 if ( !VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL)
2547 && PGMR3MapHasConflicts(pVM, pCtx->cr3, pVM->fRawR0Enabled))
2548 {
2549 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
2550 return VERR_INTERNAL_ERROR;
2551 }
2552#endif /* VBOX_STRICT */
2553
2554 /*
2555 * Process the returned status code.
2556 */
2557 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2558 {
2559 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2560 break;
2561 }
2562 rc = emR3RawHandleRC(pVM, pCtx, rc);
2563 if (rc != VINF_SUCCESS)
2564 {
2565 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
2566 if (rc != VINF_SUCCESS)
2567 {
2568 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2569 break;
2570 }
2571 }
2572
2573 /*
2574 * Check and execute forced actions.
2575 */
2576#ifdef VBOX_HIGH_RES_TIMERS_HACK
2577 TMTimerPoll(pVM);
2578#endif
2579 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTail, d);
2580 if (VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2581 {
2582 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
2583
2584 STAM_PROFILE_ADV_SUSPEND(&pVM->em.s.StatRAWTotal, a);
2585 rc = emR3ForcedActions(pVM, rc);
2586 STAM_PROFILE_ADV_RESUME(&pVM->em.s.StatRAWTotal, a);
2587 if ( rc != VINF_SUCCESS
2588 && rc != VINF_EM_RESCHEDULE_RAW)
2589 {
2590 rc = emR3RawUpdateForceFlag(pVM, pCtx, rc);
2591 if (rc != VINF_SUCCESS)
2592 {
2593 *pfFFDone = true;
2594 break;
2595 }
2596 }
2597 }
2598 }
2599
2600 /*
2601 * Return to outer loop.
2602 */
2603#if defined(LOG_ENABLED) && defined(DEBUG)
2604 RTLogFlush(NULL);
2605#endif
2606 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatRAWTotal, a);
2607 return rc;
2608}
2609
2610
2611/**
2612 * Executes hardware accelerated raw code. (Intel VMX & AMD SVM)
2613 *
2614 * This function contains the raw-mode version of the inner
2615 * execution loop (the outer loop being in EMR3ExecuteVM()).
2616 *
2617 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
2618 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2619 *
2620 * @param pVM VM handle.
2621 * @param pfFFDone Where to store an indicator telling whether or not
2622 * FFs were done before returning.
2623 */
2624static int emR3HwAccExecute(PVM pVM, bool *pfFFDone)
2625{
2626 int rc = VERR_INTERNAL_ERROR;
2627 PCPUMCTX pCtx = pVM->em.s.pCtx;
2628
2629 LogFlow(("emR3HwAccExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
2630 *pfFFDone = false;
2631
2632 STAM_COUNTER_INC(&pVM->em.s.StatHwAccExecuteEntry);
2633
2634 /*
2635 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
2636 */
2637 for (;;)
2638 {
2639 STAM_PROFILE_ADV_START(&pVM->em.s.StatHwAccEntry, a);
2640
2641 /*
2642 * Check various preconditions.
2643 */
2644 VM_FF_CLEAR(pVM, (VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_TSS));
2645
2646 /*
2647 * Process high priority pre-execution raw-mode FFs.
2648 */
2649 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK))
2650 {
2651 rc = emR3RawForcedActions(pVM, pCtx);
2652 if (VBOX_FAILURE(rc))
2653 break;
2654 }
2655
2656#ifdef LOG_ENABLED
2657 uint8_t u8Vector;
2658
2659 rc = TRPMQueryTrapAll(pVM, &u8Vector, 0, 0, 0);
2660 if (rc == VINF_SUCCESS)
2661 {
2662 Log(("Pending hardware interrupt %d\n", u8Vector));
2663 }
2664 /*
2665 * Log important stuff before entering GC.
2666 */
2667 uint32_t cpl = CPUMGetGuestCPL(pVM, CPUMCTX2CORE(pCtx));
2668
2669 if (pCtx->eflags.Bits.u1VM)
2670 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
2671 else
2672 Log(("HWR%d: %08X ESP=%08X IF=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
2673#endif
2674
2675 /*
2676 * Execute the code.
2677 */
2678 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatHwAccEntry, a);
2679 STAM_PROFILE_START(&pVM->em.s.StatHwAccExec, x);
2680 VMMR3Unlock(pVM);
2681 rc = VMMR3HwAccRunGC(pVM);
2682 VMMR3Lock(pVM);
2683 STAM_PROFILE_STOP(&pVM->em.s.StatHwAccExec, x);
2684
2685 /*
2686 * Deal with high priority post execution FFs before doing anything else.
2687 */
2688 VM_FF_CLEAR(pVM, VM_FF_RESUME_GUEST_MASK);
2689 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK))
2690 rc = emR3HighPriorityPostForcedActions(pVM, rc);
2691
2692 /*
2693 * Process the returned status code.
2694 */
2695 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
2696 break;
2697
2698 rc = emR3RawHandleRC(pVM, pCtx, rc);
2699 if (rc != VINF_SUCCESS)
2700 break;
2701
2702 /*
2703 * Check and execute forced actions.
2704 */
2705#ifdef VBOX_HIGH_RES_TIMERS_HACK
2706 TMTimerPoll(pVM);
2707#endif
2708 if (VM_FF_ISPENDING(pVM, VM_FF_ALL_MASK))
2709 {
2710 rc = emR3ForcedActions(pVM, rc);
2711 if ( rc != VINF_SUCCESS
2712 && rc != VINF_EM_RESCHEDULE_HWACC)
2713 {
2714 *pfFFDone = true;
2715 break;
2716 }
2717 }
2718 }
2719 /*
2720 * Return to outer loop.
2721 */
2722#if defined(LOG_ENABLED) && defined(DEBUG)
2723 RTLogFlush(NULL);
2724#endif
2725 return rc;
2726}
2727
2728
2729/**
2730 * Decides whether to execute RAW, HWACC or REM.
2731 *
2732 * @returns new EM state
2733 * @param pVM The VM.
2734 * @param pCtx The CPU context.
2735 */
2736inline EMSTATE emR3Reschedule(PVM pVM, PCPUMCTX pCtx)
2737{
2738 /*
2739 * When forcing raw-mode execution, things are simple.
2740 */
2741 if (pVM->em.s.fForceRAW)
2742 return EMSTATE_RAW;
2743
2744 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2745 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2746 /* !!! THIS MUST BE IN SYNC WITH remR3CanExecuteRaw !!! */
2747
2748 X86EFLAGS EFlags = pCtx->eflags;
2749 if (HWACCMIsEnabled(pVM))
2750 {
2751 /* Hardware accelerated raw-mode:
2752 *
2753 * Typically only 32-bits protected mode, with paging enabled, code is allowed here.
2754 */
2755 if (HWACCMR3CanExecuteGuest(pVM, pCtx) == true)
2756 return EMSTATE_HWACC;
2757
2758 /** @note Raw mode and hw accelerated mode are incompatible. The latter turns off monitoring features essential for raw mode! */
2759 return EMSTATE_REM;
2760 }
2761
2762 /* Standard raw-mode:
2763 *
2764 * Here we only support 16 & 32 bits protected mode ring 3 code that has no IO privileges
2765 * or 32 bits protected mode ring 0 code
2766 *
2767 * The tests are ordered by the likelyhood of being true during normal execution.
2768 */
2769 if (EFlags.u32 & (X86_EFL_TF /* | HF_INHIBIT_IRQ_MASK*/))
2770 {
2771 Log2(("raw mode refused: EFlags=%#x\n", EFlags.u32));
2772 return EMSTATE_REM;
2773 }
2774
2775#ifndef VBOX_RAW_V86
2776 if (EFlags.u32 & X86_EFL_VM) {
2777 Log2(("raw mode refused: VM_MASK\n"));
2778 return EMSTATE_REM;
2779 }
2780#endif
2781
2782 /** @todo check up the X86_CR0_AM flag in respect to raw mode!!! We're probably not emulating it right! */
2783 uint32_t u32CR0 = pCtx->cr0;
2784 if ((u32CR0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
2785 {
2786 //Log2(("raw mode refused: %s%s%s\n", (u32CR0 & X86_CR0_PG) ? "" : " !PG", (u32CR0 & X86_CR0_PE) ? "" : " !PE", (u32CR0 & X86_CR0_AM) ? "" : " !AM"));
2787 return EMSTATE_REM;
2788 }
2789
2790 if (pCtx->cr4 & X86_CR4_PAE)
2791 {
2792 uint32_t u32Dummy, u32Features;
2793
2794 CPUMGetGuestCpuId(pVM, 1, &u32Dummy, &u32Dummy, &u32Dummy, &u32Features);
2795 if (!(u32Features & X86_CPUID_FEATURE_EDX_PAE))
2796 return EMSTATE_REM;
2797 }
2798
2799 unsigned uSS = pCtx->ss;
2800 if ( pCtx->eflags.Bits.u1VM
2801 || (uSS & X86_SEL_RPL) == 3)
2802 {
2803 if (!EMIsRawRing3Enabled(pVM))
2804 return EMSTATE_REM;
2805
2806 if (!(EFlags.u32 & X86_EFL_IF))
2807 {
2808 Log2(("raw mode refused: IF (RawR3)\n"));
2809 return EMSTATE_REM;
2810 }
2811
2812 if (!(u32CR0 & X86_CR0_WP) && EMIsRawRing0Enabled(pVM))
2813 {
2814 Log2(("raw mode refused: CR0.WP + RawR0\n"));
2815 return EMSTATE_REM;
2816 }
2817 }
2818 else
2819 {
2820 if (!EMIsRawRing0Enabled(pVM))
2821 return EMSTATE_REM;
2822
2823 /* Only ring 0 supervisor code. */
2824 if ((uSS & X86_SEL_RPL) != 0)
2825 {
2826 Log2(("raw r0 mode refused: CPL %d\n", uSS & X86_SEL_RPL));
2827 return EMSTATE_REM;
2828 }
2829
2830 // Let's start with pure 32 bits ring 0 code first
2831 /** @todo What's pure 32-bit mode? flat? */
2832 if ( !(pCtx->ssHid.Attr.n.u1DefBig)
2833 || !(pCtx->csHid.Attr.n.u1DefBig))
2834 {
2835 Log2(("raw r0 mode refused: SS/CS not 32bit\n"));
2836 return EMSTATE_REM;
2837 }
2838
2839 /* Write protection muts be turned on, or else the guest can overwrite our hypervisor code and data. */
2840 if (!(u32CR0 & X86_CR0_WP))
2841 {
2842 Log2(("raw r0 mode refused: CR0.WP=0!\n"));
2843 return EMSTATE_REM;
2844 }
2845
2846 if (PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip))
2847 {
2848 Log2(("raw r0 mode forced: patch code\n"));
2849 return EMSTATE_RAW;
2850 }
2851
2852#if !defined(VBOX_ALLOW_IF0) && !defined(VBOX_RUN_INTERRUPT_GATE_HANDLERS)
2853 if (!(EFlags.u32 & X86_EFL_IF))
2854 {
2855 ////Log2(("R0: IF=0 VIF=%d %08X\n", eip, pVMeflags));
2856 //Log2(("RR0: Interrupts turned off; fall back to emulation\n"));
2857 return EMSTATE_REM;
2858 }
2859#endif
2860
2861 /** @todo still necessary??? */
2862 if (EFlags.Bits.u2IOPL != 0)
2863 {
2864 Log2(("raw r0 mode refused: IOPL %d\n", EFlags.Bits.u2IOPL));
2865 return EMSTATE_REM;
2866 }
2867 }
2868
2869 Assert(PGMPhysIsA20Enabled(pVM));
2870 return EMSTATE_RAW;
2871}
2872
2873
2874/**
2875 * Executes all high priority post execution force actions.
2876 *
2877 * @returns rc or a fatal status code.
2878 *
2879 * @param pVM VM handle.
2880 * @param rc The current rc.
2881 */
2882static int emR3HighPriorityPostForcedActions(PVM pVM, int rc)
2883{
2884 if (VM_FF_ISSET(pVM, VM_FF_PDM_CRITSECT))
2885 PDMR3CritSectFF(pVM);
2886
2887 if (VM_FF_ISSET(pVM, VM_FF_CSAM_PENDING_ACTION))
2888 CSAMR3DoPendingAction(pVM);
2889
2890 return rc;
2891}
2892
2893
2894/**
2895 * Executes all pending forced actions.
2896 *
2897 * Forced actions can cause execution delays and execution
2898 * rescheduling. The first we deal with using action priority, so
2899 * that for instance pending timers aren't scheduled and ran until
2900 * right before execution. The rescheduling we deal with using
2901 * return codes. The same goes for VM termination, only in that case
2902 * we exit everything.
2903 *
2904 * @returns VBox status code of equal or greater importance/severity than rc.
2905 * The most important ones are: VINF_EM_RESCHEDULE,
2906 * VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
2907 *
2908 * @param pVM VM handle.
2909 * @param rc The current rc.
2910 *
2911 */
2912static int emR3ForcedActions(PVM pVM, int rc)
2913{
2914#ifdef VBOX_STRICT
2915 int rcIrq = VINF_SUCCESS;
2916#endif
2917 STAM_PROFILE_START(&pVM->em.s.StatForcedActions, a);
2918
2919#define UPDATE_RC() \
2920 do { \
2921 AssertMsg(rc2 <= 0 || (rc2 >= VINF_EM_FIRST && rc2 <= VINF_EM_LAST), ("Invalid FF return code: %Vra\n", rc2)); \
2922 if (rc2 == VINF_SUCCESS || rc < VINF_SUCCESS) \
2923 break; \
2924 if (!rc || rc2 < rc) \
2925 rc = rc2; \
2926 } while (0)
2927
2928 int rc2;
2929
2930 /*
2931 * Post execution chunk first.
2932 */
2933 if (VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_POST_MASK))
2934 {
2935 /*
2936 * Termination request.
2937 */
2938 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
2939 {
2940 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
2941 STAM_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
2942 return VINF_EM_TERMINATE;
2943 }
2944
2945 /*
2946 * Debugger Facility polling.
2947 */
2948 if (VM_FF_ISSET(pVM, VM_FF_DBGF))
2949 {
2950 rc2 = DBGFR3VMMForcedAction(pVM);
2951 UPDATE_RC();
2952 }
2953
2954 /*
2955 * Postponed reset request.
2956 */
2957 if (VM_FF_ISSET(pVM, VM_FF_RESET))
2958 {
2959 rc2 = VMR3Reset(pVM);
2960 UPDATE_RC();
2961 VM_FF_CLEAR(pVM, VM_FF_RESET);
2962 }
2963
2964 /*
2965 * CSAM page scanning.
2966 */
2967 if (VM_FF_ISSET(pVM, VM_FF_CSAM_SCAN_PAGE))
2968 {
2969 PCPUMCTX pCtx = pVM->em.s.pCtx;
2970
2971 /** @todo: check for 16 or 32 bits code! (D bit in the code selector) */
2972 Log(("Forced action VM_FF_CSAM_SCAN_PAGE\n"));
2973
2974 CSAMR3CheckCodeEx(pVM, pCtx->cs, &pCtx->csHid, pCtx->eip);
2975 VM_FF_CLEAR(pVM, VM_FF_CSAM_SCAN_PAGE);
2976 }
2977
2978 /* check that we got them all */
2979 Assert(!(VM_FF_NORMAL_PRIORITY_POST_MASK & ~(VM_FF_TERMINATE | VM_FF_DBGF | VM_FF_RESET | VM_FF_CSAM_SCAN_PAGE)));
2980 }
2981
2982 /*
2983 * Normal priority then.
2984 * (Executed in no particular order.)
2985 */
2986 if (VM_FF_ISPENDING(pVM, VM_FF_NORMAL_PRIORITY_MASK))
2987 {
2988 /*
2989 * PDM Queues are pending.
2990 */
2991 if (VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES))
2992 PDMR3QueueFlushAll(pVM);
2993
2994 /*
2995 * PDM DMA transfers are pending.
2996 */
2997 if (VM_FF_ISSET(pVM, VM_FF_PDM_DMA))
2998 PDMR3DmaRun(pVM);
2999
3000 /*
3001 * Requests from other threads.
3002 */
3003 if (VM_FF_ISSET(pVM, VM_FF_REQUEST))
3004 {
3005 rc2 = VMR3ReqProcessU(pVM->pUVM);
3006 if (rc2 == VINF_EM_OFF || rc2 == VINF_EM_TERMINATE)
3007 {
3008 Log2(("emR3ForcedActions: returns %Vrc\n", rc2));
3009 STAM_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3010 return rc2;
3011 }
3012 UPDATE_RC();
3013 }
3014
3015 /* check that we got them all */
3016 Assert(!(VM_FF_NORMAL_PRIORITY_MASK & ~(VM_FF_REQUEST | VM_FF_PDM_QUEUES | VM_FF_PDM_DMA)));
3017 }
3018
3019 /*
3020 * Execute polling function ever so often.
3021 * THIS IS A HACK, IT WILL BE *REPLACED* BY PROPER ASYNC NETWORKING SOON!
3022 */
3023 static unsigned cLast = 0;
3024 if (!((++cLast) % 4))
3025 PDMR3Poll(pVM);
3026
3027 /*
3028 * High priority pre execution chunk last.
3029 * (Executed in ascending priority order.)
3030 */
3031 if (VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_MASK))
3032 {
3033 /*
3034 * Timers before interrupts.
3035 */
3036 if (VM_FF_ISSET(pVM, VM_FF_TIMER))
3037 TMR3TimerQueuesDo(pVM);
3038
3039 /*
3040 * The instruction following an emulated STI should *always* be executed!
3041 */
3042 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
3043 {
3044 Log(("VM_FF_EMULATED_STI at %VGv successor %VGv\n", CPUMGetGuestEIP(pVM), EMGetInhibitInterruptsPC(pVM)));
3045 if (CPUMGetGuestEIP(pVM) != EMGetInhibitInterruptsPC(pVM))
3046 {
3047 /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here if the eip is the same as the inhibited instr address.
3048 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
3049 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
3050 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
3051 */
3052 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
3053 }
3054 if (HWACCMR3IsActive(pVM))
3055 rc2 = VINF_EM_RESCHEDULE_HWACC;
3056 else
3057 rc2 = PATMAreInterruptsEnabled(pVM) ? VINF_EM_RESCHEDULE_RAW : VINF_EM_RESCHEDULE_REM;
3058
3059 UPDATE_RC();
3060 }
3061
3062 /*
3063 * Interrupts.
3064 */
3065 if ( !VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS)
3066 && (!rc || rc >= VINF_EM_RESCHEDULE_RAW)
3067 && !TRPMHasTrap(pVM) /* an interrupt could already be scheduled for dispatching in the recompiler. */
3068 && PATMAreInterruptsEnabled(pVM)
3069 && !HWACCMR3IsEventPending(pVM))
3070 {
3071 if (VM_FF_ISPENDING(pVM, VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC))
3072 {
3073 /** @note it's important to make sure the return code from TRPMR3InjectEvent isn't ignored! */
3074 /** @todo this really isn't nice, should properly handle this */
3075 rc2 = TRPMR3InjectEvent(pVM, TRPM_HARDWARE_INT);
3076#ifdef VBOX_STRICT
3077 rcIrq = rc2;
3078#endif
3079 UPDATE_RC();
3080 }
3081 /** @todo really ugly; if we entered the hlt state when exiting the recompiler and an interrupt was pending, we previously got stuck in the halted state. */
3082 else if (REMR3QueryPendingInterrupt(pVM) != REM_NO_PENDING_IRQ)
3083 {
3084 rc2 = VINF_EM_RESCHEDULE_REM;
3085 UPDATE_RC();
3086 }
3087 }
3088
3089 /*
3090 * Allocate handy pages.
3091 */
3092 if (VM_FF_ISSET(pVM, VM_FF_PGM_NEED_HANDY_PAGES))
3093 {
3094 rc2 = PGMR3PhysAllocateHandyPages(pVM);
3095 UPDATE_RC();
3096 }
3097
3098 /*
3099 * Debugger Facility request.
3100 */
3101 if (VM_FF_ISSET(pVM, VM_FF_DBGF))
3102 {
3103 rc2 = DBGFR3VMMForcedAction(pVM);
3104 UPDATE_RC();
3105 }
3106
3107 /*
3108 * Termination request.
3109 */
3110 if (VM_FF_ISSET(pVM, VM_FF_TERMINATE))
3111 {
3112 Log2(("emR3ForcedActions: returns VINF_EM_TERMINATE\n"));
3113 STAM_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3114 return VINF_EM_TERMINATE;
3115 }
3116
3117#ifdef DEBUG
3118 /*
3119 * Debug, pause the VM.
3120 */
3121 if (VM_FF_ISSET(pVM, VM_FF_DEBUG_SUSPEND))
3122 {
3123 VM_FF_CLEAR(pVM, VM_FF_DEBUG_SUSPEND);
3124 Log(("emR3ForcedActions: returns VINF_EM_SUSPEND\n"));
3125 return VINF_EM_SUSPEND;
3126 }
3127
3128#endif
3129 /* check that we got them all */
3130 Assert(!(VM_FF_HIGH_PRIORITY_PRE_MASK & ~(VM_FF_TIMER | VM_FF_INTERRUPT_APIC | VM_FF_INTERRUPT_PIC | VM_FF_DBGF | VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL | VM_FF_SELM_SYNC_TSS | VM_FF_TRPM_SYNC_IDT | VM_FF_SELM_SYNC_GDT | VM_FF_SELM_SYNC_LDT | VM_FF_TERMINATE | VM_FF_DEBUG_SUSPEND | VM_FF_INHIBIT_INTERRUPTS | VM_FF_PGM_NEED_HANDY_PAGES)));
3131 }
3132
3133#undef UPDATE_RC
3134 Log2(("emR3ForcedActions: returns %Vrc\n", rc));
3135 STAM_PROFILE_STOP(&pVM->em.s.StatForcedActions, a);
3136 Assert(rcIrq == VINF_SUCCESS || rcIrq == rc);
3137 return rc;
3138}
3139
3140
3141/**
3142 * Execute VM.
3143 *
3144 * This function is the main loop of the VM. The emulation thread
3145 * calls this function when the VM has been successfully constructed
3146 * and we're ready for executing the VM.
3147 *
3148 * Returning from this function means that the VM is turned off or
3149 * suspended (state already saved) and deconstruction in next in line.
3150 *
3151 * All interaction from other thread are done using forced actions
3152 * and signaling of the wait object.
3153 *
3154 * @returns VBox status code.
3155 * @param pVM The VM to operate on.
3156 */
3157EMR3DECL(int) EMR3ExecuteVM(PVM pVM)
3158{
3159 LogFlow(("EMR3ExecuteVM: pVM=%p enmVMState=%d enmState=%d (%s) fForceRAW=%d\n", pVM, pVM->enmVMState,
3160 pVM->em.s.enmState, EMR3GetStateName(pVM->em.s.enmState), pVM->em.s.fForceRAW));
3161 VM_ASSERT_EMT(pVM);
3162 Assert(pVM->em.s.enmState == EMSTATE_NONE || pVM->em.s.enmState == EMSTATE_SUSPENDED);
3163
3164 VMMR3Lock(pVM);
3165
3166 int rc = setjmp(pVM->em.s.u.FatalLongJump);
3167 if (rc == 0)
3168 {
3169 /*
3170 * Start the virtual time.
3171 */
3172 rc = TMVirtualResume(pVM);
3173 Assert(rc == VINF_SUCCESS);
3174 rc = TMCpuTickResume(pVM);
3175 Assert(rc == VINF_SUCCESS);
3176
3177 /*
3178 * The Outer Main Loop.
3179 */
3180 bool fFFDone = false;
3181 rc = VINF_EM_RESCHEDULE;
3182 pVM->em.s.enmState = EMSTATE_REM;
3183 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3184 for (;;)
3185 {
3186 /*
3187 * Before we can schedule anything (we're here because
3188 * scheduling is required) we must service any pending
3189 * forced actions to avoid any pending action causing
3190 * immidate rescheduling upon entering an inner loop
3191 *
3192 * Do forced actions.
3193 */
3194 if ( !fFFDone
3195 && rc != VINF_EM_TERMINATE
3196 && rc != VINF_EM_OFF
3197 && VM_FF_ISPENDING(pVM, VM_FF_ALL_BUT_RAW_MASK))
3198 {
3199 rc = emR3ForcedActions(pVM, rc);
3200 if ( ( rc == VINF_EM_RESCHEDULE_REM
3201 || rc == VINF_EM_RESCHEDULE_HWACC)
3202 && pVM->em.s.fForceRAW)
3203 rc = VINF_EM_RESCHEDULE_RAW;
3204 }
3205 else if (fFFDone)
3206 fFFDone = false;
3207
3208 /*
3209 * Now what to do?
3210 */
3211 Log2(("EMR3ExecuteVM: rc=%Vrc\n", rc));
3212 switch (rc)
3213 {
3214 /*
3215 * Keep doing what we're currently doing.
3216 */
3217 case VINF_SUCCESS:
3218 break;
3219
3220 /*
3221 * Reschedule - to raw-mode execution.
3222 */
3223 case VINF_EM_RESCHEDULE_RAW:
3224 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_RAW: %d -> %d (EMSTATE_RAW)\n", pVM->em.s.enmState, EMSTATE_RAW));
3225 pVM->em.s.enmState = EMSTATE_RAW;
3226 break;
3227
3228 /*
3229 * Reschedule - to hardware accelerated raw-mode execution.
3230 */
3231 case VINF_EM_RESCHEDULE_HWACC:
3232 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_HWACC: %d -> %d (EMSTATE_HWACC)\n", pVM->em.s.enmState, EMSTATE_HWACC));
3233 Assert(!pVM->em.s.fForceRAW);
3234 pVM->em.s.enmState = EMSTATE_HWACC;
3235 break;
3236
3237 /*
3238 * Reschedule - to recompiled execution.
3239 */
3240 case VINF_EM_RESCHEDULE_REM:
3241 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE_REM: %d -> %d (EMSTATE_REM)\n", pVM->em.s.enmState, EMSTATE_REM));
3242 pVM->em.s.enmState = EMSTATE_REM;
3243 break;
3244
3245 /*
3246 * Resume.
3247 */
3248 case VINF_EM_RESUME:
3249 Log2(("EMR3ExecuteVM: VINF_EM_RESUME: %d -> VINF_EM_RESCHEDULE\n", pVM->em.s.enmState));
3250 /* fall through and get scheduled. */
3251
3252 /*
3253 * Reschedule.
3254 */
3255 case VINF_EM_RESCHEDULE:
3256 {
3257 EMSTATE enmState = emR3Reschedule(pVM, pVM->em.s.pCtx);
3258 Log2(("EMR3ExecuteVM: VINF_EM_RESCHEDULE: %d -> %d (%s)\n", pVM->em.s.enmState, enmState, EMR3GetStateName(enmState)));
3259 pVM->em.s.enmState = enmState;
3260 break;
3261 }
3262
3263 /*
3264 * Halted.
3265 */
3266 case VINF_EM_HALT:
3267 Log2(("EMR3ExecuteVM: VINF_EM_HALT: %d -> %d\n", pVM->em.s.enmState, EMSTATE_HALTED));
3268 pVM->em.s.enmState = EMSTATE_HALTED;
3269 break;
3270
3271 /*
3272 * Suspend.
3273 */
3274 case VINF_EM_SUSPEND:
3275 Log2(("EMR3ExecuteVM: VINF_EM_SUSPEND: %d -> %d\n", pVM->em.s.enmState, EMSTATE_SUSPENDED));
3276 pVM->em.s.enmState = EMSTATE_SUSPENDED;
3277 break;
3278
3279 /*
3280 * Reset.
3281 * We might end up doing a double reset for now, we'll have to clean up the mess later.
3282 */
3283 case VINF_EM_RESET:
3284 Log2(("EMR3ExecuteVM: VINF_EM_RESET: %d -> %d\n", pVM->em.s.enmState, EMSTATE_REM));
3285 pVM->em.s.enmState = EMSTATE_REM;
3286 break;
3287
3288 /*
3289 * Power Off.
3290 */
3291 case VINF_EM_OFF:
3292 pVM->em.s.enmState = EMSTATE_TERMINATING;
3293 Log2(("EMR3ExecuteVM: returns VINF_EM_OFF (%d -> %d)\n", pVM->em.s.enmState, EMSTATE_TERMINATING));
3294 TMVirtualPause(pVM);
3295 TMCpuTickPause(pVM);
3296 VMMR3Unlock(pVM);
3297 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3298 return rc;
3299
3300 /*
3301 * Terminate the VM.
3302 */
3303 case VINF_EM_TERMINATE:
3304 pVM->em.s.enmState = EMSTATE_TERMINATING;
3305 Log(("EMR3ExecuteVM returns VINF_EM_TERMINATE (%d -> %d)\n", pVM->em.s.enmState, EMSTATE_TERMINATING));
3306 TMVirtualPause(pVM);
3307 TMCpuTickPause(pVM);
3308 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3309 return rc;
3310
3311 /*
3312 * Guest debug events.
3313 */
3314 case VINF_EM_DBG_STEPPED:
3315 AssertMsgFailed(("VINF_EM_DBG_STEPPED cannot be here!"));
3316 case VINF_EM_DBG_STOP:
3317 case VINF_EM_DBG_BREAKPOINT:
3318 case VINF_EM_DBG_STEP:
3319 if (pVM->em.s.enmState == EMSTATE_RAW)
3320 {
3321 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_GUEST_RAW));
3322 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
3323 }
3324 else
3325 {
3326 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_GUEST_REM));
3327 pVM->em.s.enmState = EMSTATE_DEBUG_GUEST_REM;
3328 }
3329 break;
3330
3331 /*
3332 * Hypervisor debug events.
3333 */
3334 case VINF_EM_DBG_HYPER_STEPPED:
3335 case VINF_EM_DBG_HYPER_BREAKPOINT:
3336 case VINF_EM_DBG_HYPER_ASSERTION:
3337 Log2(("EMR3ExecuteVM: %Vrc: %d -> %d\n", rc, pVM->em.s.enmState, EMSTATE_DEBUG_HYPER));
3338 pVM->em.s.enmState = EMSTATE_DEBUG_HYPER;
3339 break;
3340
3341 /*
3342 * Any error code showing up here other than the ones we
3343 * know and process above are considered to be FATAL.
3344 *
3345 * Unknown warnings and informational status codes are also
3346 * included in this.
3347 */
3348 default:
3349 if (VBOX_SUCCESS(rc))
3350 {
3351 AssertMsgFailed(("Unexpected warning or informational status code %Vra!\n", rc));
3352 rc = VERR_EM_INTERNAL_ERROR;
3353 }
3354 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3355 Log(("EMR3ExecuteVM returns %d\n", rc));
3356 break;
3357 }
3358
3359
3360 /*
3361 * Any waiters can now be woken up
3362 */
3363 VMMR3Unlock(pVM);
3364 VMMR3Lock(pVM);
3365
3366 STAM_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x); /* (skip this in release) */
3367 STAM_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3368
3369 /*
3370 * Act on the state.
3371 */
3372 switch (pVM->em.s.enmState)
3373 {
3374 /*
3375 * Execute raw.
3376 */
3377 case EMSTATE_RAW:
3378 rc = emR3RawExecute(pVM, &fFFDone);
3379 break;
3380
3381 /*
3382 * Execute hardware accelerated raw.
3383 */
3384 case EMSTATE_HWACC:
3385 rc = emR3HwAccExecute(pVM, &fFFDone);
3386 break;
3387
3388 /*
3389 * Execute recompiled.
3390 */
3391 case EMSTATE_REM:
3392 rc = emR3RemExecute(pVM, &fFFDone);
3393 Log2(("EMR3ExecuteVM: emR3RemExecute -> %Vrc\n", rc));
3394 break;
3395
3396 /*
3397 * hlt - execution halted until interrupt.
3398 */
3399 case EMSTATE_HALTED:
3400 {
3401 STAM_REL_PROFILE_START(&pVM->em.s.StatHalted, y);
3402 rc = VMR3WaitHalted(pVM, !(CPUMGetGuestEFlags(pVM) & X86_EFL_IF));
3403 STAM_REL_PROFILE_STOP(&pVM->em.s.StatHalted, y);
3404 break;
3405 }
3406
3407 /*
3408 * Suspended - return to VM.cpp.
3409 */
3410 case EMSTATE_SUSPENDED:
3411 TMVirtualPause(pVM);
3412 TMCpuTickPause(pVM);
3413 VMMR3Unlock(pVM);
3414 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3415 return VINF_EM_SUSPEND;
3416
3417 /*
3418 * Debugging in the guest.
3419 */
3420 case EMSTATE_DEBUG_GUEST_REM:
3421 case EMSTATE_DEBUG_GUEST_RAW:
3422 TMVirtualPause(pVM);
3423 TMCpuTickPause(pVM);
3424 rc = emR3Debug(pVM, rc);
3425 TMVirtualResume(pVM);
3426 TMCpuTickResume(pVM);
3427 Log2(("EMR3ExecuteVM: enmr3Debug -> %Vrc (state %d)\n", rc, pVM->em.s.enmState));
3428 break;
3429
3430 /*
3431 * Debugging in the hypervisor.
3432 */
3433 case EMSTATE_DEBUG_HYPER:
3434 {
3435 TMVirtualPause(pVM);
3436 TMCpuTickPause(pVM);
3437 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3438
3439 rc = emR3Debug(pVM, rc);
3440 Log2(("EMR3ExecuteVM: enmr3Debug -> %Vrc (state %d)\n", rc, pVM->em.s.enmState));
3441 if (rc != VINF_SUCCESS)
3442 {
3443 /* switch to guru meditation mode */
3444 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3445 VMMR3FatalDump(pVM, rc);
3446 return rc;
3447 }
3448
3449 STAM_REL_PROFILE_ADV_START(&pVM->em.s.StatTotal, x);
3450 TMVirtualResume(pVM);
3451 TMCpuTickResume(pVM);
3452 break;
3453 }
3454
3455 /*
3456 * Guru meditation takes place in the debugger.
3457 */
3458 case EMSTATE_GURU_MEDITATION:
3459 {
3460 TMVirtualPause(pVM);
3461 TMCpuTickPause(pVM);
3462 VMMR3FatalDump(pVM, rc);
3463 emR3Debug(pVM, rc);
3464 VMMR3Unlock(pVM);
3465 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3466 return rc;
3467 }
3468
3469 /*
3470 * The states we don't expect here.
3471 */
3472 case EMSTATE_NONE:
3473 case EMSTATE_TERMINATING:
3474 default:
3475 AssertMsgFailed(("EMR3ExecuteVM: Invalid state %d!\n", pVM->em.s.enmState));
3476 pVM->em.s.enmState = EMSTATE_GURU_MEDITATION;
3477 TMVirtualPause(pVM);
3478 TMCpuTickPause(pVM);
3479 VMMR3Unlock(pVM);
3480 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3481 return VERR_EM_INTERNAL_ERROR;
3482 }
3483 } /* The Outer Main Loop */
3484 }
3485 else
3486 {
3487 /*
3488 * Fatal error.
3489 */
3490 LogFlow(("EMR3ExecuteVM: returns %Vrc (longjmp / fatal error)\n", rc));
3491 TMVirtualPause(pVM);
3492 TMCpuTickPause(pVM);
3493 VMMR3FatalDump(pVM, rc);
3494 emR3Debug(pVM, rc);
3495 VMMR3Unlock(pVM);
3496 STAM_REL_PROFILE_ADV_STOP(&pVM->em.s.StatTotal, x);
3497 /** @todo change the VM state! */
3498 return rc;
3499 }
3500
3501 /* (won't ever get here). */
3502 AssertFailed();
3503}
3504
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette