VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR3/EMHM.cpp@ 55705

Last change on this file since 55705 was 47807, checked in by vboxsync, 11 years ago

EM/HM: Don't switch to REM immediately, try up to 1024 instruction in IEM first - only HM exec mode. This covers most trips to REM when booting xppro without nested paging.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 21.4 KB
Line 
1/* $Id: EMHM.cpp 47807 2013-08-16 12:54:26Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager - hardware virtualization
4 */
5
6/*
7 * Copyright (C) 2006-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_EM
22#include <VBox/vmm/em.h>
23#include <VBox/vmm/vmm.h>
24#include <VBox/vmm/csam.h>
25#include <VBox/vmm/selm.h>
26#include <VBox/vmm/trpm.h>
27#include <VBox/vmm/iem.h>
28#include <VBox/vmm/iom.h>
29#include <VBox/vmm/dbgf.h>
30#include <VBox/vmm/pgm.h>
31#ifdef VBOX_WITH_REM
32# include <VBox/vmm/rem.h>
33#endif
34#include <VBox/vmm/tm.h>
35#include <VBox/vmm/mm.h>
36#include <VBox/vmm/ssm.h>
37#include <VBox/vmm/pdmapi.h>
38#include <VBox/vmm/pdmcritsect.h>
39#include <VBox/vmm/pdmqueue.h>
40#include <VBox/vmm/hm.h>
41#include "EMInternal.h"
42#include <VBox/vmm/vm.h>
43#include <VBox/vmm/cpumdis.h>
44#include <VBox/dis.h>
45#include <VBox/disopcode.h>
46#include <VBox/vmm/dbgf.h>
47#include "VMMTracing.h"
48
49#include <iprt/asm.h>
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55#if 0 /* Disabled till after 2.1.0 when we've time to test it. */
56#define EM_NOTIFY_HM
57#endif
58
59
60/*******************************************************************************
61* Internal Functions *
62*******************************************************************************/
63DECLINLINE(int) emR3HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
64static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
65static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
66
67#define EMHANDLERC_WITH_HM
68#define emR3ExecuteInstruction emR3HmExecuteInstruction
69#define emR3ExecuteIOInstruction emR3HmExecuteIOInstruction
70#include "EMHandleRCTmpl.h"
71
72
73/**
74 * Executes instruction in HM mode if we can.
75 *
76 * This is somewhat comparable to REMR3EmulateInstruction.
77 *
78 * @returns VBox strict status code.
79 * @retval VINF_EM_DBG_STEPPED on success.
80 * @retval VERR_EM_CANNOT_EXEC_GUEST if we cannot execute guest instructions in
81 * HM right now.
82 *
83 * @param pVM Pointer to the cross context VM structure.
84 * @param pVCpu Pointer to the cross context CPU structure for
85 * the calling EMT.
86 * @param fFlags Combinations of EM_ONE_INS_FLAGS_XXX.
87 * @thread EMT.
88 */
89VMMR3_INT_DECL(VBOXSTRICTRC) EMR3HmSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags)
90{
91 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
92 Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK));
93
94 if (!HMR3CanExecuteGuest(pVM, pCtx))
95 return VINF_EM_RESCHEDULE;
96
97 uint64_t const uOldRip = pCtx->rip;
98 for (;;)
99 {
100 /*
101 * Service necessary FFs before going into HM.
102 */
103 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
104 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
105 {
106 VBOXSTRICTRC rcStrict = emR3HmForcedActions(pVM, pVCpu, pCtx);
107 if (rcStrict != VINF_SUCCESS)
108 {
109 Log(("EMR3HmSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
110 return rcStrict;
111 }
112 }
113
114 /*
115 * Go execute it.
116 */
117 bool fOld = HMSetSingleInstruction(pVCpu, true);
118 VBOXSTRICTRC rcStrict = VMMR3HmRunGC(pVM, pVCpu);
119 HMSetSingleInstruction(pVCpu, fOld);
120 LogFlow(("EMR3HmSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
121
122 /*
123 * Handle high priority FFs and informational status codes. We don't do
124 * normal FF processing the caller or the next call can deal with them.
125 */
126 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
127 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
128 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
129 {
130 rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict));
131 LogFlow(("EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
132 }
133
134 if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST))
135 {
136 rcStrict = emR3HmHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict));
137 Log(("EMR3HmSingleInstruction: emR3HmHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
138 }
139
140 /*
141 * Done?
142 */
143 if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED)
144 || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE)
145 || pCtx->rip != uOldRip)
146 {
147 if (rcStrict == VINF_SUCCESS && pCtx->rip != uOldRip)
148 rcStrict = VINF_EM_DBG_STEPPED;
149 Log(("EMR3HmSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pCtx->rip));
150 return rcStrict;
151 }
152 }
153}
154
155
156/**
157 * Executes one (or perhaps a few more) instruction(s).
158 *
159 * @returns VBox status code suitable for EM.
160 *
161 * @param pVM Pointer to the VM.
162 * @param pVCpu Pointer to the VMCPU.
163 * @param rcRC Return code from RC.
164 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
165 * instruction and prefix the log output with this text.
166 */
167#ifdef LOG_ENABLED
168static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC, const char *pszPrefix)
169#else
170static int emR3HmExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcRC)
171#endif
172{
173#ifdef LOG_ENABLED
174 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
175#endif
176 int rc;
177 NOREF(rcRC);
178
179#ifdef LOG_ENABLED
180 /*
181 * Log it.
182 */
183 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
184 if (pszPrefix)
185 {
186 DBGFR3_INFO_LOG(pVM, "cpumguest", pszPrefix);
187 DBGFR3_DISAS_INSTR_CUR_LOG(pVCpu, pszPrefix);
188 }
189#endif
190
191 /*
192 * Use IEM and fallback on REM if the functionality is missing.
193 * Once IEM gets mature enough, nothing should ever fall back.
194 */
195#if defined(VBOX_WITH_FIRST_IEM_STEP) || !defined(VBOX_WITH_REM)
196 STAM_PROFILE_START(&pVCpu->em.s.StatIEMEmu, a);
197 rc = VBOXSTRICTRC_TODO(IEMExecOne(pVCpu));
198 STAM_PROFILE_STOP(&pVCpu->em.s.StatIEMEmu, a);
199
200 if ( rc == VERR_IEM_ASPECT_NOT_IMPLEMENTED
201 || rc == VERR_IEM_INSTR_NOT_IMPLEMENTED)
202#endif
203 {
204#ifdef VBOX_WITH_REM
205 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, b);
206# ifndef VBOX_WITH_FIRST_IEM_STEP
207 Log(("EMINS[rem]: %04x:%RGv RSP=%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
208//# elif defined(DEBUG_bird)
209// AssertFailed();
210# endif
211 EMRemLock(pVM);
212 /* Flush the recompiler TLB if the VCPU has changed. */
213 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
214 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
215 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
216
217 rc = REMR3EmulateInstruction(pVM, pVCpu);
218 EMRemUnlock(pVM);
219 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, b);
220#else /* !VBOX_WITH_REM */
221 NOREF(pVM);
222#endif /* !VBOX_WITH_REM */
223 }
224
225#ifdef EM_NOTIFY_HM
226 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_HM)
227 HMR3NotifyEmulated(pVCpu);
228#endif
229 return rc;
230}
231
232
233/**
234 * Executes one (or perhaps a few more) instruction(s).
235 * This is just a wrapper for discarding pszPrefix in non-logging builds.
236 *
237 * @returns VBox status code suitable for EM.
238 * @param pVM Pointer to the VM.
239 * @param pVCpu Pointer to the VMCPU.
240 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
241 * instruction and prefix the log output with this text.
242 * @param rcGC GC return code
243 */
244DECLINLINE(int) emR3HmExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
245{
246#ifdef LOG_ENABLED
247 return emR3HmExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
248#else
249 return emR3HmExecuteInstructionWorker(pVM, pVCpu, rcGC);
250#endif
251}
252
253/**
254 * Executes one (or perhaps a few more) IO instruction(s).
255 *
256 * @returns VBox status code suitable for EM.
257 * @param pVM Pointer to the VM.
258 * @param pVCpu Pointer to the VMCPU.
259 */
260static int emR3HmExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
261{
262 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
263
264 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
265
266 /*
267 * Try to restart the io instruction that was refused in ring-0.
268 */
269 VBOXSTRICTRC rcStrict = HMR3RestartPendingIOInstr(pVM, pVCpu, pCtx);
270 if (IOM_SUCCESS(rcStrict))
271 {
272 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoRestarted);
273 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
274 return VBOXSTRICTRC_TODO(rcStrict); /* rip already updated. */
275 }
276 AssertMsgReturn(rcStrict == VERR_NOT_FOUND, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)),
277 RT_SUCCESS_NP(rcStrict) ? VERR_IPE_UNEXPECTED_INFO_STATUS : VBOXSTRICTRC_TODO(rcStrict));
278
279#ifdef VBOX_WITH_FIRST_IEM_STEP
280 /*
281 * Hand it over to the interpreter.
282 */
283 rcStrict = IEMExecOne(pVCpu);
284 LogFlow(("emR3HmExecuteIOInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
285 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIoIem);
286 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
287 return VBOXSTRICTRC_TODO(rcStrict);
288
289#else
290 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
291 * as io instructions tend to come in packages of more than one
292 */
293 DISCPUSTATE Cpu;
294 int rc2 = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
295 if (RT_SUCCESS(rc2))
296 {
297 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
298
299 if (!(Cpu.fPrefix & (DISPREFIX_REP | DISPREFIX_REPNE)))
300 {
301 switch (Cpu.pCurInstr->uOpcode)
302 {
303 case OP_IN:
304 {
305 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
306 rcStrict = IOMInterpretIN(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
307 break;
308 }
309
310 case OP_OUT:
311 {
312 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
313 rcStrict = IOMInterpretOUT(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
314 break;
315 }
316 }
317 }
318 else if (Cpu.fPrefix & DISPREFIX_REP)
319 {
320 switch (Cpu.pCurInstr->uOpcode)
321 {
322 case OP_INSB:
323 case OP_INSWD:
324 {
325 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
326 rcStrict = IOMInterpretINS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
327 break;
328 }
329
330 case OP_OUTSB:
331 case OP_OUTSWD:
332 {
333 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
334 rcStrict = IOMInterpretOUTS(pVM, pVCpu, CPUMCTX2CORE(pCtx), &Cpu);
335 break;
336 }
337 }
338 }
339
340 /*
341 * Handled the I/O return codes.
342 * (The unhandled cases end up with rcStrict == VINF_EM_RAW_EMULATE_INSTR.)
343 */
344 if (IOM_SUCCESS(rcStrict))
345 {
346 pCtx->rip += Cpu.cbInstr;
347 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
348 LogFlow(("emR3HmExecuteIOInstruction: %Rrc 1\n", VBOXSTRICTRC_VAL(rcStrict)));
349 return VBOXSTRICTRC_TODO(rcStrict);
350 }
351
352 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
353 {
354 /* The active trap will be dispatched. */
355 Assert(TRPMHasTrap(pVCpu));
356 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
357 LogFlow(("emR3HmExecuteIOInstruction: VINF_SUCCESS 2\n"));
358 return VINF_SUCCESS;
359 }
360 AssertMsg(rcStrict != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
361
362 if (RT_FAILURE(rcStrict))
363 {
364 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
365 LogFlow(("emR3HmExecuteIOInstruction: %Rrc 3\n", VBOXSTRICTRC_VAL(rcStrict)));
366 return VBOXSTRICTRC_TODO(rcStrict);
367 }
368 AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
369 }
370
371 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
372 int rc3 = emR3HmExecuteInstruction(pVM, pVCpu, "IO: ");
373 LogFlow(("emR3HmExecuteIOInstruction: %Rrc 4 (rc2=%Rrc, rc3=%Rrc)\n", VBOXSTRICTRC_VAL(rcStrict), rc2, rc3));
374 return rc3;
375#endif
376}
377
378
379/**
380 * Process raw-mode specific forced actions.
381 *
382 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
383 *
384 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
385 * EM statuses.
386 * @param pVM Pointer to the VM.
387 * @param pVCpu Pointer to the VMCPU.
388 * @param pCtx Pointer to the guest CPU context.
389 */
390static int emR3HmForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
391{
392 /*
393 * Sync page directory.
394 */
395 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
396 {
397 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
398 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
399 if (RT_FAILURE(rc))
400 return rc;
401
402#ifdef VBOX_WITH_RAW_MODE
403 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
404#endif
405
406 /* Prefetch pages for EIP and ESP. */
407 /** @todo This is rather expensive. Should investigate if it really helps at all. */
408 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
409 if (rc == VINF_SUCCESS)
410 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
411 if (rc != VINF_SUCCESS)
412 {
413 if (rc != VINF_PGM_SYNC_CR3)
414 {
415 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
416 return rc;
417 }
418 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
419 if (RT_FAILURE(rc))
420 return rc;
421 }
422 /** @todo maybe prefetch the supervisor stack page as well */
423#ifdef VBOX_WITH_RAW_MODE
424 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
425#endif
426 }
427
428 /*
429 * Allocate handy pages (just in case the above actions have consumed some pages).
430 */
431 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
432 {
433 int rc = PGMR3PhysAllocateHandyPages(pVM);
434 if (RT_FAILURE(rc))
435 return rc;
436 }
437
438 /*
439 * Check whether we're out of memory now.
440 *
441 * This may stem from some of the above actions or operations that has been executed
442 * since we ran FFs. The allocate handy pages must for instance always be followed by
443 * this check.
444 */
445 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY))
446 return VINF_EM_NO_MEMORY;
447
448 return VINF_SUCCESS;
449}
450
451
452/**
453 * Executes hardware accelerated raw code. (Intel VT-x & AMD-V)
454 *
455 * This function contains the raw-mode version of the inner
456 * execution loop (the outer loop being in EMR3ExecuteVM()).
457 *
458 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW,
459 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
460 *
461 * @param pVM Pointer to the VM.
462 * @param pVCpu Pointer to the VMCPU.
463 * @param pfFFDone Where to store an indicator telling whether or not
464 * FFs were done before returning.
465 */
466int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
467{
468 int rc = VERR_IPE_UNINITIALIZED_STATUS;
469 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
470
471 LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
472 *pfFFDone = false;
473
474 STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry);
475
476#ifdef EM_NOTIFY_HM
477 HMR3NotifyScheduled(pVCpu);
478#endif
479
480 /*
481 * Spin till we get a forced action which returns anything but VINF_SUCCESS.
482 */
483 for (;;)
484 {
485 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a);
486
487 /* Check if a forced reschedule is pending. */
488 if (HMR3IsRescheduleRequired(pVM, pCtx))
489 {
490 rc = VINF_EM_RESCHEDULE;
491 break;
492 }
493
494 /*
495 * Process high priority pre-execution raw-mode FFs.
496 */
497#ifdef VBOX_WITH_RAW_MODE
498 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
499#endif
500 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
501 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
502 {
503 rc = emR3HmForcedActions(pVM, pVCpu, pCtx);
504 if (rc != VINF_SUCCESS)
505 break;
506 }
507
508#ifdef LOG_ENABLED
509 /*
510 * Log important stuff before entering GC.
511 */
512 if (TRPMHasTrap(pVCpu))
513 Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
514
515 uint32_t cpl = CPUMGetGuestCPL(pVCpu);
516
517 if (pVM->cCpus == 1)
518 {
519 if (pCtx->eflags.Bits.u1VM)
520 Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF));
521 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
522 Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
523 else
524 Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
525 }
526 else
527 {
528 if (pCtx->eflags.Bits.u1VM)
529 Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF));
530 else if (CPUMIsGuestIn64BitCodeEx(pCtx))
531 Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
532 else
533 Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER));
534 }
535#endif /* LOG_ENABLED */
536
537 /*
538 * Execute the code.
539 */
540 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHmEntry, a);
541
542 if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu)))
543 {
544 STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x);
545 rc = VMMR3HmRunGC(pVM, pVCpu);
546 STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x);
547 }
548 else
549 {
550 /* Give up this time slice; virtual time continues */
551 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u);
552 RTThreadSleep(5);
553 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u);
554 rc = VINF_SUCCESS;
555 }
556
557
558 /*
559 * Deal with high priority post execution FFs before doing anything else.
560 */
561 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
562 if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
563 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
564 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
565
566 /*
567 * Process the returned status code.
568 */
569 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
570 break;
571
572 rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc);
573 if (rc != VINF_SUCCESS)
574 break;
575
576 /*
577 * Check and execute forced actions.
578 */
579#ifdef VBOX_HIGH_RES_TIMERS_HACK
580 TMTimerPollVoid(pVM, pVCpu);
581#endif
582 if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK)
583 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK))
584 {
585 rc = emR3ForcedActions(pVM, pVCpu, rc);
586 VBOXVMM_EM_FF_ALL_RET(pVCpu, rc);
587 if ( rc != VINF_SUCCESS
588 && rc != VINF_EM_RESCHEDULE_HM)
589 {
590 *pfFFDone = true;
591 break;
592 }
593 }
594 }
595
596 /*
597 * Return to outer loop.
598 */
599#if defined(LOG_ENABLED) && defined(DEBUG)
600 RTLogFlush(NULL);
601#endif
602 return rc;
603}
604
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette