VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EMHM.cpp@ 72749

Last change on this file since 72749 was 72749, checked in by vboxsync, 6 years ago

VMM: Remove EM_NOTIFY_HM and related code unused since VirtualBox 2.1 or earlier.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.1 KB
Line 
1/* $Id: EMHM.cpp 72749 2018-06-29 07:57:05Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager - hardware virtualization
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#define LOG_GROUP LOG_GROUP_EM
23#define VMCPU_INCL_CPUM_GST_CTX
24#include <VBox/vmm/em.h>
25#include <VBox/vmm/vmm.h>
26#include <VBox/vmm/csam.h>
27#include <VBox/vmm/selm.h>
28#include <VBox/vmm/trpm.h>
29#include <VBox/vmm/iem.h>
30#include <VBox/vmm/iom.h>
31#include <VBox/vmm/dbgf.h>
32#include <VBox/vmm/pgm.h>
33#ifdef VBOX_WITH_REM
34# include <VBox/vmm/rem.h>
35#endif
36#include <VBox/vmm/tm.h>
37#include <VBox/vmm/mm.h>
38#include <VBox/vmm/ssm.h>
39#include <VBox/vmm/pdmapi.h>
40#include <VBox/vmm/pdmcritsect.h>
41#include <VBox/vmm/pdmqueue.h>
42#include <VBox/vmm/hm.h>
43#include "EMInternal.h"
44#include <VBox/vmm/vm.h>
45#include <VBox/vmm/gim.h>
46#include <VBox/vmm/cpumdis.h>
47#include <VBox/dis.h>
48#include <VBox/disopcode.h>
49#include <VBox/vmm/dbgf.h>
50#include "VMMTracing.h"
51
52#include <iprt/asm.h>
53
54
55/*********************************************************************************************************************************
56* Internal Functions *
57*********************************************************************************************************************************/
58static int emR3HmHandleRC(PVM pVM, PVMCPU pVCpu, int rc);
59DECLINLINE(int) emR3HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
60static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
61static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu);
62
63#define EMHANDLERC_WITH_HM
64#define emR3ExecuteInstruction emR3HmExecuteInstruction
65#define emR3ExecuteIOInstruction emR3HmExecuteIOInstruction
66#include "EMHandleRCTmpl.h"
67
68
69/**
70 * Executes instruction in HM mode if we can.
71 *
72 * This is somewhat comparable to REMR3EmulateInstruction.
73 *
74 * @returns VBox strict status code.
75 * @retval VINF_EM_DBG_STEPPED on success.
76 * @retval VERR_EM_CANNOT_EXEC_GUEST if we cannot execute guest instructions in
77 * HM right now.
78 *
79 * @param pVM The cross context VM structure.
80 * @param pVCpu The cross context virtual CPU structure for the calling EMT.
81 * @param fFlags Combinations of EM_ONE_INS_FLAGS_XXX.
82 * @thread EMT.
83 */
84VMMR3_INT_DECL(VBOXSTRICTRC) EMR3HmSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
85{
86 Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK));
87
88 if (!HMR3CanExecuteGuest(pVM, &pVCpu->cpum.GstCtx))
89 return VINF_EM_RESCHEDULE;
90
91 uint64_t const uOldRip = pVCpu->cpum.GstCtx.rip;
92 for (;;)
93 {
94 /*
95 * Service necessary FFs before going into HM.
96 */
97 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
98 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
99 {
100 VBOXSTRICTRC rcStrict = emR3HmForcedActions(pVM, pVCpu);
101 if (rcStrict != VINF_SUCCESS)
102 {
103 Log(("EMR3HmSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
104 return rcStrict;
105 }
106 }
107
108 /*
109 * Go execute it.
110 */
111 bool fOld = HMSetSingleInstruction(pVM, pVCpu, true);
112 VBOXSTRICTRC rcStrict = VMMR3HmRunGC(pVM, pVCpu);
113 HMSetSingleInstruction(pVM, pVCpu, fOld);
114 LogFlow(("EMR3HmSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
115
116 /*
117 * Handle high priority FFs and informational status codes. We don't do
118 * normal FF processing the caller or the next call can deal with them.
119 */
120 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
121 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
122 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
123 {
124 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, rcStrict);
125 LogFlow(("EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
126 }
127
128 if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST))
129 {
130 rcStrict = emR3HmHandleRC(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
131 Log(("EMR3HmSingleInstruction: emR3HmHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
132 }
133
134 /*
135 * Done?
136 */
137 if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
138 || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE)
139 || pVCpu->cpum.GstCtx.rip != uOldRip)
140 {
141 if (rcStrict == VINF_SUCCESS && pVCpu->cpum.GstCtx.rip != uOldRip)
142 rcStrict = VINF_EM_DBG_STEPPED;
143 Log(("EMR3HmSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pVCpu->cpum.GstCtx.rip));
144 CPUM_IMPORT_EXTRN_RET(pVCpu, ~CPUMCTX_EXTRN_KEEPER_MASK);
145 return rcStrict;
146 }
147 }
148}
149
150
151/**
152 * Executes one (or perhaps a few more) instruction(s).
153 *
154 * @returns VBox status code suitable for EM.
155 *
156 * @param pVM The cross context VM structure.
157 * @param pVCpu The cross context virtual CPU structure.
158 * @param rcRC Return code from RC.
159 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
160 * instruction and prefix the log output with this text.
161 */
162#if defined(LOG_ENABLED) || defined(DOXYGEN_RUNNING)
163static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
164#else
165static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
166#endif
167{
168 NOREF(rcRC);
169
170#ifdef LOG_ENABLED
171 /*
172 * Log it.
173 */
174 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, (RTGCPTR)pVCpu->cpum.GstCtx.rsp));
175 if (pszPrefix)
176 {
177 DBGFR3_INFO_LOG(pVM, pVCpu, "cpumguest", pszPrefix);
178 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
179 }
180#endif
181
182 /*
183 * Use IEM and fallback on REM if the functionality is missing.
184 * Once IEM gets mature enough, nothing should ever fall back.
185 */
186 STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
187 VBOXSTRICTRC rcStrict;
188 uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
189 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
190 if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
191 {
192 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
193 rcStrict = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu));
194 }
195 else
196 {
197 RT_UNTRUSTED_VALIDATED_FENCE();
198 rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
199 LogFlow(("emR3HmExecuteInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
200 }
201 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);
202
203 if ( rcStrict == VERR_IEM_ASPECT_NOT_IMPLEMENTED
204 || rcStrict == VERR_IEM_INSTR_NOT_IMPLEMENTED)
205 {
206#ifdef VBOX_WITH_REM
207 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b);
208 EMRemLock(pVM);
209 /* Flush the recompiler TLB if the VCPU has changed. */
210 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
211 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
212 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
213
214 rcStrict = REMR3EmulateInstruction(pVM, pVCpu);
215 EMRemUnlock(pVM);
216 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, b);
217#else /* !VBOX_WITH_REM */
218 NOREF(pVM);
219#endif /* !VBOX_WITH_REM */
220 }
221
222 return VBOXSTRICTRC_TODO(rcStrict);
223}
224
225
226/**
227 * Executes one (or perhaps a few more) instruction(s).
228 * This is just a wrapper for discarding pszPrefix in non-logging builds.
229 *
230 * @returns VBox status code suitable for EM.
231 * @param pVM The cross context VM structure.
232 * @param pVCpu The cross context virtual CPU structure.
233 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
234 * instruction and prefix the log output with this text.
235 * @param rcGC GC return code
236 */
237DECLINLINE(int) emR3HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
238{
239#ifdef LOG_ENABLED
240 return emR3HmExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
241#else
242 RT_NOREF_PV(pszPrefix);
243 return emR3HmExecuteInstructionWorker(pVM, pVCpu, rcGC);
244#endif
245}
246
247/**
248 * Executes one (or perhaps a few more) IO instruction(s).
249 *
250 * @returns VBox status code suitable for EM.
251 * @param pVM The cross context VM structure.
252 * @param pVCpu The cross context virtual CPU structure.
253 */
254static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
255{
256 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
257
258 VBOXSTRICTRC rcStrict;
259 uint32_t idxContinueExitRec = pVCpu->em.s.idxContinueExitRec;
260 RT_UNTRUSTED_NONVOLATILE_COPY_FENCE();
261 if (idxContinueExitRec >= RT_ELEMENTS(pVCpu->em.s.aExitRecords))
262 {
263 /*
264 * Try to restart the io instruction that was refused in ring-0.
265 */
266 rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, &pVCpu->cpum.GstCtx);
267 if (IOM_SUCCESS(rcStrict))
268 {
269 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
270 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
271 return VBOXSTRICTRC_TODO(rcStrict); /* rip already updated. */
272 }
273 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
274 RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));
275
276 /*
277 * Hand it over to the interpreter.
278 */
279 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
280 rcStrict = IEMExecOne(pVCpu);
281 LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
282 }
283 else
284 {
285 RT_UNTRUSTED_VALIDATED_FENCE();
286 CPUM_IMPORT_EXTRN_RET(pVCpu, IEM_CPUMCTX_EXTRN_MUST_MASK);
287 Assert(!HMR3HasPendingIOInstr(pVCpu));
288 rcStrict = EMHistoryExec(pVCpu, &pVCpu->em.s.aExitRecords[idxContinueExitRec], 0);
289 LogFlow(("emR3HmExecuteIOInstruction: %Rrc (EMHistoryExec)\n", VBOXSTRICTRC_VAL(rcStrict)));
290 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
291 }
292
293 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
294 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
295 return VBOXSTRICTRC_TODO(rcStrict);
296}
297
298
299/**
300 * Process HM specific forced actions.
301 *
302 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK
303 * or/and VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK are pending.
304 *
305 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
306 * EM statuses.
307 * @param pVM The cross context VM structure.
308 * @param pVCpu The cross context virtual CPU structure.
309 */
310static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu)
311{
312 /*
313 * Sync page directory.
314 */
315 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
316 {
317 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CR0 | CPUMCTX_EXTRN_CR3 | CPUMCTX_EXTRN_CR4);
318 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
319 int rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
320 if (RT_FAILURE(rc))
321 return rc;
322
323#ifdef VBOX_WITH_RAW_MODE
324 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
325#endif
326
327 /* Prefetch pages for EIP and ESP. */
328 /** @todo This is rather expensive. Should investigate if it really helps at all. */
329 /** @todo this should be skipped! */
330 CPUM_IMPORT_EXTRN_RET(pVCpu, CPUMCTX_EXTRN_CS | CPUMCTX_EXTRN_SS);
331 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pVCpu->cpum.GstCtx.rip));
332 if (rc == VINF_SUCCESS)
333 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE(&pVCpu->cpum.GstCtx), pVCpu->cpum.GstCtx.rsp));
334 if (rc != VINF_SUCCESS)
335 {
336 if (rc != VINF_PGM_SYNC_CR3)
337 {
338 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
339 return rc;
340 }
341 rc = PGMSyncCR3(pVCpu, pVCpu->cpum.GstCtx.cr0, pVCpu->cpum.GstCtx.cr3, pVCpu->cpum.GstCtx.cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
342 if (RT_FAILURE(rc))
343 return rc;
344 }
345 /** @todo maybe prefetch the supervisor stack page as well */
346#ifdef VBOX_WITH_RAW_MODE
347 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
348#endif
349 }
350
351 /*
352 * Allocate handy pages (just in case the above actions have consumed some pages).
353 */
354 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
355 {
356 int rc = PGMR3PhysAllocateHandyPages(pVM);
357 if (RT_FAILURE(rc))
358 return rc;
359 }
360
361 /*
362 * Check whether we're out of memory now.
363 *
364 * This may stem from some of the above actions or operations that has been executed
365 * since we ran FFs. The allocate handy pages must for instance always be followed by
366 * this check.
367 */
368 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
369 return VINF_EM_NO_MEMORY;
370
371 return VINF_SUCCESS;
372}
373
374
375/**
376 * Executes hardware accelerated raw code. (Intel VT-x & AMD-V)
377 *
378 * This function contains the raw-mode version of the inner
379 * execution loop (the outer loop being in EMR3ExecuteVM()).
380 *
381 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
382 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
383 *
384 * @param pVM The cross context VM structure.
385 * @param pVCpu The cross context virtual CPU structure.
386 * @param pfFFDone Where to store an indicator telling whether or not
387 * FFs were done before returning.
388 */
389int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
390{
391 int rc = VERR_IPE_UNINITIALIZED_STATUS;
392
393 LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
394 *pfFFDone = false;
395
396 STAM_COUNTER_INC(&pVCpu->em.s.StatHMExecuteCalled);
397
398 /*
399 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
400 */
401 for (;;)
402 {
403 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHMEntry, a);
404
405 /* Check if a forced reschedule is pending. */
406 if (HMR3IsRescheduleRequired(pVM, &pVCpu->cpum.GstCtx))
407 {
408 rc = VINF_EM_RESCHEDULE;
409 break;
410 }
411
412 /*
413 * Process high priority pre-execution raw-mode FFs.
414 */
415#ifdef VBOX_WITH_RAW_MODE
416 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
417#endif
418 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
419 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
420 {
421 rc = emR3HmForcedActions(pVM, pVCpu);
422 if (rc != VINF_SUCCESS)
423 break;
424 }
425
426#ifdef LOG_ENABLED
427 /*
428 * Log important stuff before entering GC.
429 */
430 if (TRPMHasTrap(pVCpu))
431 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip));
432
433 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
434 if (pVM->cCpus == 1)
435 {
436 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
437 Log(("HWV86: %08X IF=%d\n", pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
438 else if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
439 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
440 else
441 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
442 }
443 else
444 {
445 if (pVCpu->cpum.GstCtx.eflags.Bits.u1VM)
446 Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.eflags.Bits.u1IF));
447 else if (CPUMIsGuestIn64BitCodeEx(&pVCpu->cpum.GstCtx))
448 Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, (RTGCPTR)pVCpu->cpum.GstCtx.rip, pVCpu->cpum.GstCtx.rsp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
449 else
450 Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pVCpu->cpum.GstCtx.cs.Sel, pVCpu->cpum.GstCtx.eip, pVCpu->cpum.GstCtx.esp, pVCpu->cpum.GstCtx.eflags.Bits.u1IF, pVCpu->cpum.GstCtx.eflags.Bits.u2IOPL, (uint32_t)pVCpu->cpum.GstCtx.cr0, (uint32_t)pVCpu->cpum.GstCtx.cr4, (uint32_t)pVCpu->cpum.GstCtx.msrEFER));
451 }
452#endif /* LOG_ENABLED */
453
454 /*
455 * Execute the code.
456 */
457 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHMEntry, a);
458
459 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
460 {
461 STAM_PROFILE_START(&pVCpu->em.s.StatHMExec, x);
462 rc = VMMR3HmRunGC(pVM, pVCpu);
463 STAM_PROFILE_STOP(&pVCpu->em.s.StatHMExec, x);
464 }
465 else
466 {
467 /* Give up this time slice; virtual time continues */
468 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
469 RTThreadSleep(5);
470 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
471 rc = VINF_SUCCESS;
472 }
473
474
475 /*
476 * Deal with high priority post execution FFs before doing anything else.
477 */
478 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
479 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
480 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
481 rc = VBOXSTRICTRC_TODO(emR3HighPriorityPostForcedActions(pVM, pVCpu, rc));
482
483 /*
484 * Process the returned status code.
485 */
486 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
487 break;
488
489 rc = emR3HmHandleRC(pVM, pVCpu, rc);
490 if (rc != VINF_SUCCESS)
491 break;
492
493 /*
494 * Check and execute forced actions.
495 */
496#ifdef VBOX_HIGH_RES_TIMERS_HACK
497 TMTimerPollVoid(pVM, pVCpu);
498#endif
499 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK)
500 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK))
501 {
502 rc = emR3ForcedActions(pVM, pVCpu, rc);
503 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
504 if ( rc != VINF_SUCCESS
505 && rc != VINF_EM_RESCHEDULE_HM)
506 {
507 *pfFFDone = true;
508 break;
509 }
510 }
511 }
512
513 /*
514 * Return to outer loop.
515 */
516#if defined(LOG_ENABLED) && defined(DEBUG)
517 RTLogFlush(NULL);
518#endif
519 return rc;
520}
521
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette