VirtualBox

source: vbox/trunk/src/VBox/VMM/EMRaw.cpp@ 29293

Last change on this file since 29293 was 29250, checked in by vboxsync, 15 years ago

iprt/asm*.h: split out asm-math.h, don't include asm-*.h from asm.h, don't include asm.h from sup.h. Fixed a couple file headers.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 58.5 KB
Line 
1/* $Id: EMRaw.cpp 29250 2010-05-09 17:53:58Z vboxsync $ */
2/** @file
3 * EM - Execution Monitor / Manager - software virtualization
4 */
5
6/*
7 * Copyright (C) 2006-2009 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/** @page pg_em EM - The Execution Monitor / Manager
19 *
20 * The Execution Monitor/Manager is responsible for running the VM, scheduling
21 * the right kind of execution (Raw-mode, Hardware Assisted, Recompiled or
22 * Interpreted), and keeping the CPU states in sync. The function
23 * EMR3ExecuteVM() is the 'main-loop' of the VM, while each of the execution
24 * modes has different inner loops (emR3RawExecute, emR3HwAccExecute, and
25 * emR3RemExecute).
26 *
27 * The interpreted execution is only used to avoid switching between
28 * raw-mode/hwaccm and the recompiler when fielding virtualization traps/faults.
29 * The interpretation is thus implemented as part of EM.
30 *
31 * @see grp_em
32 */
33
34/*******************************************************************************
35* Header Files *
36*******************************************************************************/
37#define LOG_GROUP LOG_GROUP_EM
38#include <VBox/em.h>
39#include <VBox/vmm.h>
40#ifdef VBOX_WITH_VMI
41# include <VBox/parav.h>
42#endif
43#include <VBox/patm.h>
44#include <VBox/csam.h>
45#include <VBox/selm.h>
46#include <VBox/trpm.h>
47#include <VBox/iom.h>
48#include <VBox/dbgf.h>
49#include <VBox/pgm.h>
50#include <VBox/rem.h>
51#include <VBox/tm.h>
52#include <VBox/mm.h>
53#include <VBox/ssm.h>
54#include <VBox/pdmapi.h>
55#include <VBox/pdmcritsect.h>
56#include <VBox/pdmqueue.h>
57#include <VBox/patm.h>
58#include "EMInternal.h"
59#include <VBox/vm.h>
60#include <VBox/cpumdis.h>
61#include <VBox/dis.h>
62#include <VBox/disopcode.h>
63#include <VBox/dbgf.h>
64
65#include <VBox/log.h>
66#include <iprt/asm.h>
67#include <iprt/string.h>
68#include <iprt/stream.h>
69
70
71/*******************************************************************************
72* Defined Constants And Macros *
73*******************************************************************************/
74
75
76/*******************************************************************************
77* Internal Functions *
78*******************************************************************************/
79static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
80DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC = VINF_SUCCESS);
81static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu);
82static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret);
83static int emR3SingleStepExecRem(PVM pVM, uint32_t cIterations);
84static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu);
85static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu);
86static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu);
87
88#define EMHANDLERC_WITH_PATM
89#include "EMHandleRCTmpl.h"
90
91/**
92 * Enables or disables a set of raw-mode execution modes.
93 *
94 * @returns VINF_SUCCESS on success.
95 * @returns VINF_RESCHEDULE if a rescheduling might be required.
96 * @returns VERR_INVALID_PARAMETER on an invalid enmMode value.
97 *
98 * @param pVM The VM to operate on.
99 * @param enmMode The execution mode change.
100 * @thread The emulation thread.
101 */
102VMMR3DECL(int) EMR3RawSetMode(PVM pVM, EMRAWMODE enmMode)
103{
104 switch (enmMode)
105 {
106 case EMRAW_NONE:
107 pVM->fRawR3Enabled = false;
108 pVM->fRawR0Enabled = false;
109 break;
110 case EMRAW_RING3_ENABLE:
111 pVM->fRawR3Enabled = true;
112 break;
113 case EMRAW_RING3_DISABLE:
114 pVM->fRawR3Enabled = false;
115 break;
116 case EMRAW_RING0_ENABLE:
117 pVM->fRawR0Enabled = true;
118 break;
119 case EMRAW_RING0_DISABLE:
120 pVM->fRawR0Enabled = false;
121 break;
122 default:
123 AssertMsgFailed(("Invalid enmMode=%d\n", enmMode));
124 return VERR_INVALID_PARAMETER;
125 }
126 Log(("EMR3SetRawMode: fRawR3Enabled=%RTbool fRawR0Enabled=%RTbool\n",
127 pVM->fRawR3Enabled, pVM->fRawR0Enabled));
128 return pVM->aCpus[0].em.s.enmState == EMSTATE_RAW ? VINF_EM_RESCHEDULE : VINF_SUCCESS;
129}
130
131
132
133#ifdef VBOX_WITH_STATISTICS
134/**
135 * Just a braindead function to keep track of cli addresses.
136 * @param pVM VM handle.
137 * @param pVMCPU VMCPU handle.
138 * @param GCPtrInstr The EIP of the cli instruction.
139 */
140static void emR3RecordCli(PVM pVM, PVMCPU pVCpu, RTGCPTR GCPtrInstr)
141{
142 PCLISTAT pRec;
143
144 pRec = (PCLISTAT)RTAvlGCPtrGet(&pVCpu->em.s.pCliStatTree, GCPtrInstr);
145 if (!pRec)
146 {
147 /* New cli instruction; insert into the tree. */
148 pRec = (PCLISTAT)MMR3HeapAllocZ(pVM, MM_TAG_EM, sizeof(*pRec));
149 Assert(pRec);
150 if (!pRec)
151 return;
152 pRec->Core.Key = GCPtrInstr;
153
154 char szCliStatName[32];
155 RTStrPrintf(szCliStatName, sizeof(szCliStatName), "/EM/Cli/0x%RGv", GCPtrInstr);
156 STAM_REG(pVM, &pRec->Counter, STAMTYPE_COUNTER, szCliStatName, STAMUNIT_OCCURENCES, "Number of times cli was executed.");
157
158 bool fRc = RTAvlGCPtrInsert(&pVCpu->em.s.pCliStatTree, &pRec->Core);
159 Assert(fRc); NOREF(fRc);
160 }
161 STAM_COUNTER_INC(&pRec->Counter);
162 STAM_COUNTER_INC(&pVCpu->em.s.StatTotalClis);
163}
164#endif /* VBOX_WITH_STATISTICS */
165
166
167
168/**
169 * Resumes executing hypervisor after a debug event.
170 *
171 * This is kind of special since our current guest state is
172 * potentially out of sync.
173 *
174 * @returns VBox status code.
175 * @param pVM The VM handle.
176 * @param pVCpu The VMCPU handle.
177 */
178int emR3RawResumeHyper(PVM pVM, PVMCPU pVCpu)
179{
180 int rc;
181 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
182 Assert(pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER);
183 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr\n", pCtx->cs, pCtx->eip, pCtx->eflags));
184
185 /*
186 * Resume execution.
187 */
188 CPUMRawEnter(pVCpu, NULL);
189 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_RF);
190 rc = VMMR3ResumeHyper(pVM, pVCpu);
191 Log(("emR3RawResumeHyper: cs:eip=%RTsel:%RGr efl=%RGr - returned from GC with rc=%Rrc\n", pCtx->cs, pCtx->eip, pCtx->eflags, rc));
192 rc = CPUMRawLeave(pVCpu, NULL, rc);
193 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
194
195 /*
196 * Deal with the return code.
197 */
198 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
199 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
200 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
201 return rc;
202}
203
204
205/**
206 * Steps rawmode.
207 *
208 * @returns VBox status code.
209 * @param pVM The VM handle.
210 * @param pVCpu The VMCPU handle.
211 */
212int emR3RawStep(PVM pVM, PVMCPU pVCpu)
213{
214 Assert( pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER
215 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_RAW
216 || pVCpu->em.s.enmState == EMSTATE_DEBUG_GUEST_REM);
217 int rc;
218 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
219 bool fGuest = pVCpu->em.s.enmState != EMSTATE_DEBUG_HYPER;
220#ifndef DEBUG_sandervl
221 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
222 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu)));
223#endif
224 if (fGuest)
225 {
226 /*
227 * Check vital forced actions, but ignore pending interrupts and timers.
228 */
229 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
230 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
231 {
232 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
233 if (rc != VINF_SUCCESS)
234 return rc;
235 }
236
237 /*
238 * Set flags for single stepping.
239 */
240 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
241 }
242 else
243 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) | X86_EFL_TF | X86_EFL_RF);
244
245 /*
246 * Single step.
247 * We do not start time or anything, if anything we should just do a few nanoseconds.
248 */
249 CPUMRawEnter(pVCpu, NULL);
250 do
251 {
252 if (pVCpu->em.s.enmState == EMSTATE_DEBUG_HYPER)
253 rc = VMMR3ResumeHyper(pVM, pVCpu);
254 else
255 rc = VMMR3RawRunGC(pVM, pVCpu);
256#ifndef DEBUG_sandervl
257 Log(("emR3RawStep: cs:eip=%RTsel:%RGr efl=%RGr - GC rc %Rrc\n", fGuest ? CPUMGetGuestCS(pVCpu) : CPUMGetHyperCS(pVCpu),
258 fGuest ? CPUMGetGuestEIP(pVCpu) : CPUMGetHyperEIP(pVCpu), fGuest ? CPUMGetGuestEFlags(pVCpu) : CPUMGetHyperEFlags(pVCpu), rc));
259#endif
260 } while ( rc == VINF_SUCCESS
261 || rc == VINF_EM_RAW_INTERRUPT);
262 rc = CPUMRawLeave(pVCpu, NULL, rc);
263 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
264
265 /*
266 * Make sure the trap flag is cleared.
267 * (Too bad if the guest is trying to single step too.)
268 */
269 if (fGuest)
270 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
271 else
272 CPUMSetHyperEFlags(pVCpu, CPUMGetHyperEFlags(pVCpu) & ~X86_EFL_TF);
273
274 /*
275 * Deal with the return codes.
276 */
277 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
278 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
279 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
280 return rc;
281}
282
283
284#ifdef DEBUG
285
286
287int emR3SingleStepExecRaw(PVM pVM, PVMCPU pVCpu, uint32_t cIterations)
288{
289 int rc = VINF_SUCCESS;
290 EMSTATE enmOldState = pVCpu->em.s.enmState;
291 pVCpu->em.s.enmState = EMSTATE_DEBUG_GUEST_RAW;
292
293 Log(("Single step BEGIN:\n"));
294 for (uint32_t i = 0; i < cIterations; i++)
295 {
296 DBGFR3PrgStep(pVCpu);
297 DBGFR3DisasInstrCurrentLog(pVCpu, "RSS: ");
298 rc = emR3RawStep(pVM, pVCpu);
299 if (rc != VINF_SUCCESS)
300 break;
301 }
302 Log(("Single step END: rc=%Rrc\n", rc));
303 CPUMSetGuestEFlags(pVCpu, CPUMGetGuestEFlags(pVCpu) & ~X86_EFL_TF);
304 pVCpu->em.s.enmState = enmOldState;
305 return rc;
306}
307
308#endif /* DEBUG */
309
310
311/**
312 * Executes one (or perhaps a few more) instruction(s).
313 *
314 * @returns VBox status code suitable for EM.
315 *
316 * @param pVM VM handle.
317 * @param pVCpu VMCPU handle
318 * @param rcGC GC return code
319 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
320 * instruction and prefix the log output with this text.
321 */
322#ifdef LOG_ENABLED
323static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC, const char *pszPrefix)
324#else
325static int emR3ExecuteInstructionWorker(PVM pVM, PVMCPU pVCpu, int rcGC)
326#endif
327{
328 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
329 int rc;
330
331 /*
332 *
333 * The simple solution is to use the recompiler.
334 * The better solution is to disassemble the current instruction and
335 * try handle as many as possible without using REM.
336 *
337 */
338
339#ifdef LOG_ENABLED
340 /*
341 * Disassemble the instruction if requested.
342 */
343 if (pszPrefix)
344 {
345 DBGFR3InfoLog(pVM, "cpumguest", pszPrefix);
346 DBGFR3DisasInstrCurrentLog(pVCpu, pszPrefix);
347 }
348#endif /* LOG_ENABLED */
349
350 /*
351 * PATM is making life more interesting.
352 * We cannot hand anything to REM which has an EIP inside patch code. So, we'll
353 * tell PATM there is a trap in this code and have it take the appropriate actions
354 * to allow us execute the code in REM.
355 */
356 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
357 {
358 Log(("emR3ExecuteInstruction: In patch block. eip=%RRv\n", (RTRCPTR)pCtx->eip));
359
360 RTGCPTR pNewEip;
361 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
362 switch (rc)
363 {
364 /*
365 * It's not very useful to emulate a single instruction and then go back to raw
366 * mode; just execute the whole block until IF is set again.
367 */
368 case VINF_SUCCESS:
369 Log(("emR3ExecuteInstruction: Executing instruction starting at new address %RGv IF=%d VMIF=%x\n",
370 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
371 pCtx->eip = pNewEip;
372 Assert(pCtx->eip);
373
374 if (pCtx->eflags.Bits.u1IF)
375 {
376 /*
377 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
378 */
379 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
380 return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
381 }
382 else if (rcGC == VINF_PATM_PENDING_IRQ_AFTER_IRET)
383 {
384 /* special case: iret, that sets IF, detected a pending irq/event */
385 return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIRET");
386 }
387 return VINF_EM_RESCHEDULE_REM;
388
389 /*
390 * One instruction.
391 */
392 case VINF_PATCH_EMULATE_INSTR:
393 Log(("emR3ExecuteInstruction: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
394 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
395 pCtx->eip = pNewEip;
396 return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
397
398 /*
399 * The patch was disabled, hand it to the REM.
400 */
401 case VERR_PATCH_DISABLED:
402 Log(("emR3ExecuteInstruction: Disabled patch -> new eip %RGv IF=%d VMIF=%x\n",
403 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
404 pCtx->eip = pNewEip;
405 if (pCtx->eflags.Bits.u1IF)
406 {
407 /*
408 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
409 */
410 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
411 return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
412 }
413 return VINF_EM_RESCHEDULE_REM;
414
415 /* Force continued patch exection; usually due to write monitored stack. */
416 case VINF_PATCH_CONTINUE:
417 return VINF_SUCCESS;
418
419 default:
420 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap\n", rc));
421 return VERR_IPE_UNEXPECTED_STATUS;
422 }
423 }
424
425#if 0
426 /* Try our own instruction emulator before falling back to the recompiler. */
427 DISCPUSTATE Cpu;
428 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "GEN EMU");
429 if (RT_SUCCESS(rc))
430 {
431 uint32_t size;
432
433 switch (Cpu.pCurInstr->opcode)
434 {
435 /* @todo we can do more now */
436 case OP_MOV:
437 case OP_AND:
438 case OP_OR:
439 case OP_XOR:
440 case OP_POP:
441 case OP_INC:
442 case OP_DEC:
443 case OP_XCHG:
444 STAM_PROFILE_START(&pVCpu->em.s.StatMiscEmu, a);
445 rc = EMInterpretInstructionCPU(pVM, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
446 if (RT_SUCCESS(rc))
447 {
448 pCtx->rip += Cpu.opsize;
449 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
450 return rc;
451 }
452 if (rc != VERR_EM_INTERPRETER)
453 AssertMsgFailedReturn(("rc=%Rrc\n", rc), rc);
454 STAM_PROFILE_STOP(&pVCpu->em.s.StatMiscEmu, a);
455 break;
456 }
457 }
458#endif /* 0 */
459 STAM_PROFILE_START(&pVCpu->em.s.StatREMEmu, a);
460 Log(("EMINS: %04x:%RGv RSP=%RGv\n", pCtx->cs, (RTGCPTR)pCtx->rip, (RTGCPTR)pCtx->rsp));
461 EMRemLock(pVM);
462 /* Flush the recompiler TLB if the VCPU has changed. */
463 if (pVM->em.s.idLastRemCpu != pVCpu->idCpu)
464 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_ALL);
465 pVM->em.s.idLastRemCpu = pVCpu->idCpu;
466
467 rc = REMR3EmulateInstruction(pVM, pVCpu);
468 EMRemUnlock(pVM);
469 STAM_PROFILE_STOP(&pVCpu->em.s.StatREMEmu, a);
470
471 return rc;
472}
473
474
475/**
476 * Executes one (or perhaps a few more) instruction(s).
477 * This is just a wrapper for discarding pszPrefix in non-logging builds.
478 *
479 * @returns VBox status code suitable for EM.
480 * @param pVM VM handle.
481 * @param pVCpu VMCPU handle.
482 * @param pszPrefix Disassembly prefix. If not NULL we'll disassemble the
483 * instruction and prefix the log output with this text.
484 * @param rcGC GC return code
485 */
486DECLINLINE(int) emR3ExecuteInstruction(PVM pVM, PVMCPU pVCpu, const char *pszPrefix, int rcGC)
487{
488#ifdef LOG_ENABLED
489 return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC, pszPrefix);
490#else
491 return emR3ExecuteInstructionWorker(pVM, pVCpu, rcGC);
492#endif
493}
494
495/**
496 * Executes one (or perhaps a few more) IO instruction(s).
497 *
498 * @returns VBox status code suitable for EM.
499 * @param pVM VM handle.
500 * @param pVCpu VMCPU handle.
501 */
502static int emR3ExecuteIOInstruction(PVM pVM, PVMCPU pVCpu)
503{
504 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
505
506 STAM_PROFILE_START(&pVCpu->em.s.StatIOEmu, a);
507
508 /** @todo probably we should fall back to the recompiler; otherwise we'll go back and forth between HC & GC
509 * as io instructions tend to come in packages of more than one
510 */
511 DISCPUSTATE Cpu;
512 int rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "IO EMU");
513 if (RT_SUCCESS(rc))
514 {
515 VBOXSTRICTRC rcStrict = VINF_EM_RAW_EMULATE_INSTR;
516
517 if (!(Cpu.prefix & (PREFIX_REP | PREFIX_REPNE)))
518 {
519 switch (Cpu.pCurInstr->opcode)
520 {
521 case OP_IN:
522 {
523 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
524 rcStrict = IOMInterpretIN(pVM, CPUMCTX2CORE(pCtx), &Cpu);
525 break;
526 }
527
528 case OP_OUT:
529 {
530 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
531 rcStrict = IOMInterpretOUT(pVM, CPUMCTX2CORE(pCtx), &Cpu);
532 break;
533 }
534 }
535 }
536 else if (Cpu.prefix & PREFIX_REP)
537 {
538 switch (Cpu.pCurInstr->opcode)
539 {
540 case OP_INSB:
541 case OP_INSWD:
542 {
543 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatIn);
544 rcStrict = IOMInterpretINS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
545 break;
546 }
547
548 case OP_OUTSB:
549 case OP_OUTSWD:
550 {
551 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatOut);
552 rcStrict = IOMInterpretOUTS(pVM, CPUMCTX2CORE(pCtx), &Cpu);
553 break;
554 }
555 }
556 }
557
558 /*
559 * Handled the I/O return codes.
560 * (The unhandled cases end up with rcStrict == VINF_EM_RAW_EMULATE_INSTR.)
561 */
562 if (IOM_SUCCESS(rcStrict))
563 {
564 pCtx->rip += Cpu.opsize;
565 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
566 return VBOXSTRICTRC_TODO(rcStrict);
567 }
568
569 if (rcStrict == VINF_EM_RAW_GUEST_TRAP)
570 {
571 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
572 rcStrict = emR3RawGuestTrap(pVM, pVCpu);
573 return VBOXSTRICTRC_TODO(rcStrict);
574 }
575 AssertMsg(rcStrict != VINF_TRPM_XCPT_DISPATCHED, ("Handle VINF_TRPM_XCPT_DISPATCHED\n"));
576
577 if (RT_FAILURE(rcStrict))
578 {
579 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
580 return VBOXSTRICTRC_TODO(rcStrict);
581 }
582 AssertMsg(rcStrict == VINF_EM_RAW_EMULATE_INSTR || rcStrict == VINF_EM_RESCHEDULE_REM, ("rcStrict=%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
583 }
584 STAM_PROFILE_STOP(&pVCpu->em.s.StatIOEmu, a);
585 return emR3ExecuteInstruction(pVM, pVCpu, "IO: ");
586}
587
588
589/**
590 * Handle a guest context trap.
591 *
592 * @returns VBox status code suitable for EM.
593 * @param pVM VM handle.
594 * @param pVCpu VMCPU handle.
595 */
596static int emR3RawGuestTrap(PVM pVM, PVMCPU pVCpu)
597{
598 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
599
600 /*
601 * Get the trap info.
602 */
603 uint8_t u8TrapNo;
604 TRPMEVENT enmType;
605 RTGCUINT uErrorCode;
606 RTGCUINTPTR uCR2;
607 int rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
608 if (RT_FAILURE(rc))
609 {
610 AssertReleaseMsgFailed(("No trap! (rc=%Rrc)\n", rc));
611 return rc;
612 }
613
614
615#if 1 /* Experimental: Review, disable if it causes trouble. */
616 /*
617 * Handle traps in patch code first.
618 *
619 * We catch a few of these cases in RC before returning to R3 (#PF, #GP, #BP)
620 * but several traps isn't handled specially by TRPM in RC and we end up here
621 * instead. One example is #DE.
622 */
623 uint32_t uCpl = CPUMGetGuestCPL(pVCpu, CPUMCTX2CORE(pCtx));
624 if ( uCpl == 0
625 && PATMIsPatchGCAddr(pVM, pCtx->eip))
626 {
627 LogFlow(("emR3RawGuestTrap: trap %#x in patch code; eip=%08x\n", u8TrapNo, pCtx->eip));
628 return emR3PatchTrap(pVM, pVCpu, pCtx, rc);
629 }
630#endif
631
632 /*
633 * If the guest gate is marked unpatched, then we will check again if we can patch it.
634 * (This assumes that we've already tried and failed to dispatch the trap in
635 * RC for the gates that already has been patched. Which is true for most high
636 * volume traps, because these are handled specially, but not for odd ones like #DE.)
637 */
638 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) == TRPM_INVALID_HANDLER)
639 {
640 CSAMR3CheckGates(pVM, u8TrapNo, 1);
641 Log(("emR3RawHandleRC: recheck gate %x -> valid=%d\n", u8TrapNo, TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER));
642
643 /* If it was successful, then we could go back to raw mode. */
644 if (TRPMR3GetGuestTrapHandler(pVM, u8TrapNo) != TRPM_INVALID_HANDLER)
645 {
646 /* Must check pending forced actions as our IDT or GDT might be out of sync. */
647 rc = EMR3CheckRawForcedActions(pVM, pVCpu);
648 AssertRCReturn(rc, rc);
649
650 TRPMERRORCODE enmError = uErrorCode != ~0U
651 ? TRPM_TRAP_HAS_ERRORCODE
652 : TRPM_TRAP_NO_ERRORCODE;
653 rc = TRPMForwardTrap(pVCpu, CPUMCTX2CORE(pCtx), u8TrapNo, uErrorCode, enmError, TRPM_TRAP, -1);
654 if (rc == VINF_SUCCESS /* Don't use RT_SUCCESS */)
655 {
656 TRPMResetTrap(pVCpu);
657 return VINF_EM_RESCHEDULE_RAW;
658 }
659 AssertMsg(rc == VINF_EM_RAW_GUEST_TRAP, ("%Rrc\n", rc));
660 }
661 }
662
663 /*
664 * Scan kernel code that traps; we might not get another chance.
665 */
666 /** @todo move this up before the dispatching? */
667 if ( (pCtx->ss & X86_SEL_RPL) <= 1
668 && !pCtx->eflags.Bits.u1VM)
669 {
670 Assert(!PATMIsPatchGCAddr(pVM, pCtx->eip));
671 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
672 }
673
674 /*
675 * Trap specific handling.
676 */
677 if (u8TrapNo == 6) /* (#UD) Invalid opcode. */
678 {
679 /*
680 * If MONITOR & MWAIT are supported, then interpret them here.
681 */
682 DISCPUSTATE cpu;
683 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap (#UD): ");
684 if ( RT_SUCCESS(rc)
685 && (cpu.pCurInstr->opcode == OP_MONITOR || cpu.pCurInstr->opcode == OP_MWAIT))
686 {
687 uint32_t u32Dummy, u32Features, u32ExtFeatures;
688 CPUMGetGuestCpuId(pVCpu, 1, &u32Dummy, &u32Dummy, &u32ExtFeatures, &u32Features);
689 if (u32ExtFeatures & X86_CPUID_FEATURE_ECX_MONITOR)
690 {
691 rc = TRPMResetTrap(pVCpu);
692 AssertRC(rc);
693
694 uint32_t opsize;
695 rc = EMInterpretInstructionCPU(pVM, pVCpu, &cpu, CPUMCTX2CORE(pCtx), 0, &opsize);
696 if (RT_SUCCESS(rc))
697 {
698 pCtx->rip += cpu.opsize;
699 return rc;
700 }
701 return emR3ExecuteInstruction(pVM, pVCpu, "Monitor: ");
702 }
703 }
704 }
705 else if (u8TrapNo == 13) /* (#GP) Privileged exception */
706 {
707 /*
708 * Handle I/O bitmap?
709 */
710 /** @todo We're not supposed to be here with a false guest trap concerning
711 * I/O access. We can easily handle those in RC. */
712 DISCPUSTATE cpu;
713 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &cpu, "Guest Trap: ");
714 if ( RT_SUCCESS(rc)
715 && (cpu.pCurInstr->optype & OPTYPE_PORTIO))
716 {
717 /*
718 * We should really check the TSS for the IO bitmap, but it's not like this
719 * lazy approach really makes things worse.
720 */
721 rc = TRPMResetTrap(pVCpu);
722 AssertRC(rc);
723 return emR3ExecuteInstruction(pVM, pVCpu, "IO Guest Trap: ");
724 }
725 }
726
727#ifdef LOG_ENABLED
728 DBGFR3InfoLog(pVM, "cpumguest", "Guest trap");
729 DBGFR3DisasInstrCurrentLog(pVCpu, "Guest trap");
730
731 /* Get guest page information. */
732 uint64_t fFlags = 0;
733 RTGCPHYS GCPhys = 0;
734 int rc2 = PGMGstGetPage(pVCpu, uCR2, &fFlags, &GCPhys);
735 Log(("emR3RawGuestTrap: cs:eip=%04x:%08x: trap=%02x err=%08x cr2=%08x cr0=%08x%s: Phys=%RGp fFlags=%08llx %s %s %s%s rc2=%d\n",
736 pCtx->cs, pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0, (enmType == TRPM_SOFTWARE_INT) ? " software" : "", GCPhys, fFlags,
737 fFlags & X86_PTE_P ? "P " : "NP", fFlags & X86_PTE_US ? "U" : "S",
738 fFlags & X86_PTE_RW ? "RW" : "R0", fFlags & X86_PTE_G ? " G" : "", rc2));
739#endif
740
741 /*
742 * #PG has CR2.
743 * (Because of stuff like above we must set CR2 in a delayed fashion.)
744 */
745 if (u8TrapNo == 14 /* #PG */)
746 pCtx->cr2 = uCR2;
747
748 return VINF_EM_RESCHEDULE_REM;
749}
750
751
752/**
753 * Handle a ring switch trap.
754 * Need to do statistics and to install patches. The result is going to REM.
755 *
756 * @returns VBox status code suitable for EM.
757 * @param pVM VM handle.
758 * @param pVCpu VMCPU handle.
759 */
760static int emR3RawRingSwitch(PVM pVM, PVMCPU pVCpu)
761{
762 int rc;
763 DISCPUSTATE Cpu;
764 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
765
766 /*
767 * sysenter, syscall & callgate
768 */
769 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "RSWITCH: ");
770 if (RT_SUCCESS(rc))
771 {
772 if (Cpu.pCurInstr->opcode == OP_SYSENTER)
773 {
774 if (pCtx->SysEnter.cs != 0)
775 {
776 rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
777 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
778 if (RT_SUCCESS(rc))
779 {
780 DBGFR3DisasInstrCurrentLog(pVCpu, "Patched sysenter instruction");
781 return VINF_EM_RESCHEDULE_RAW;
782 }
783 }
784 }
785
786#ifdef VBOX_WITH_STATISTICS
787 switch (Cpu.pCurInstr->opcode)
788 {
789 case OP_SYSENTER:
790 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysEnter);
791 break;
792 case OP_SYSEXIT:
793 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysExit);
794 break;
795 case OP_SYSCALL:
796 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysCall);
797 break;
798 case OP_SYSRET:
799 STAM_COUNTER_INC(&pVCpu->em.s.CTX_SUFF(pStats)->StatSysRet);
800 break;
801 }
802#endif
803 }
804 else
805 AssertRC(rc);
806
807 /* go to the REM to emulate a single instruction */
808 return emR3ExecuteInstruction(pVM, pVCpu, "RSWITCH: ");
809}
810
811
812/**
813 * Handle a trap (\#PF or \#GP) in patch code
814 *
815 * @returns VBox status code suitable for EM.
816 * @param pVM VM handle.
817 * @param pVCpu VMCPU handle.
818 * @param pCtx CPU context
819 * @param gcret GC return code
820 */
821static int emR3PatchTrap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int gcret)
822{
823 uint8_t u8TrapNo;
824 int rc;
825 TRPMEVENT enmType;
826 RTGCUINT uErrorCode;
827 RTGCUINTPTR uCR2;
828
829 Assert(PATMIsPatchGCAddr(pVM, pCtx->eip));
830
831 if (gcret == VINF_PATM_PATCH_INT3)
832 {
833 u8TrapNo = 3;
834 uCR2 = 0;
835 uErrorCode = 0;
836 }
837 else if (gcret == VINF_PATM_PATCH_TRAP_GP)
838 {
839 /* No active trap in this case. Kind of ugly. */
840 u8TrapNo = X86_XCPT_GP;
841 uCR2 = 0;
842 uErrorCode = 0;
843 }
844 else
845 {
846 rc = TRPMQueryTrapAll(pVCpu, &u8TrapNo, &enmType, &uErrorCode, &uCR2);
847 if (RT_FAILURE(rc))
848 {
849 AssertReleaseMsgFailed(("emR3PatchTrap: no trap! (rc=%Rrc) gcret=%Rrc\n", rc, gcret));
850 return rc;
851 }
852 /* Reset the trap as we'll execute the original instruction again. */
853 TRPMResetTrap(pVCpu);
854 }
855
856 /*
857 * Deal with traps inside patch code.
858 * (This code won't run outside GC.)
859 */
860 if (u8TrapNo != 1)
861 {
862#ifdef LOG_ENABLED
863 DBGFR3InfoLog(pVM, "cpumguest", "Trap in patch code");
864 DBGFR3DisasInstrCurrentLog(pVCpu, "Patch code");
865
866 DISCPUSTATE Cpu;
867 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->eip, &Cpu, "Patch code: ");
868 if ( RT_SUCCESS(rc)
869 && Cpu.pCurInstr->opcode == OP_IRET)
870 {
871 uint32_t eip, selCS, uEFlags;
872
873 /* Iret crashes are bad as we have already changed the flags on the stack */
874 rc = PGMPhysSimpleReadGCPtr(pVCpu, &eip, pCtx->esp, 4);
875 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selCS, pCtx->esp+4, 4);
876 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &uEFlags, pCtx->esp+8, 4);
877 if (rc == VINF_SUCCESS)
878 {
879 if ( (uEFlags & X86_EFL_VM)
880 || (selCS & X86_SEL_RPL) == 3)
881 {
882 uint32_t selSS, esp;
883
884 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &esp, pCtx->esp + 12, 4);
885 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selSS, pCtx->esp + 16, 4);
886
887 if (uEFlags & X86_EFL_VM)
888 {
889 uint32_t selDS, selES, selFS, selGS;
890 rc = PGMPhysSimpleReadGCPtr(pVCpu, &selES, pCtx->esp + 20, 4);
891 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selDS, pCtx->esp + 24, 4);
892 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selFS, pCtx->esp + 28, 4);
893 rc |= PGMPhysSimpleReadGCPtr(pVCpu, &selGS, pCtx->esp + 32, 4);
894 if (rc == VINF_SUCCESS)
895 {
896 Log(("Patch code: IRET->VM stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
897 Log(("Patch code: IRET->VM stack frame: DS=%04X ES=%04X FS=%04X GS=%04X\n", selDS, selES, selFS, selGS));
898 }
899 }
900 else
901 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x ss:esp=%04X:%08RX32\n", selCS, eip, uEFlags, selSS, esp));
902 }
903 else
904 Log(("Patch code: IRET stack frame: return address %04X:%08RX32 eflags=%08x\n", selCS, eip, uEFlags));
905 }
906 }
907#endif /* LOG_ENABLED */
908 Log(("emR3PatchTrap: in patch: eip=%08x: trap=%02x err=%08x cr2=%08x cr0=%08x\n",
909 pCtx->eip, u8TrapNo, uErrorCode, uCR2, (uint32_t)pCtx->cr0));
910
911 RTGCPTR pNewEip;
912 rc = PATMR3HandleTrap(pVM, pCtx, pCtx->eip, &pNewEip);
913 switch (rc)
914 {
915 /*
916 * Execute the faulting instruction.
917 */
918 case VINF_SUCCESS:
919 {
920 /** @todo execute a whole block */
921 Log(("emR3PatchTrap: Executing faulting instruction at new address %RGv\n", pNewEip));
922 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
923 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
924
925 pCtx->eip = pNewEip;
926 AssertRelease(pCtx->eip);
927
928 if (pCtx->eflags.Bits.u1IF)
929 {
930 /* Windows XP lets irets fault intentionally and then takes action based on the opcode; an
931 * int3 patch overwrites it and leads to blue screens. Remove the patch in this case.
932 */
933 if ( u8TrapNo == X86_XCPT_GP
934 && PATMIsInt3Patch(pVM, pCtx->eip, NULL, NULL))
935 {
936 /** @todo move to PATMR3HandleTrap */
937 Log(("Possible Windows XP iret fault at %08RX32\n", pCtx->eip));
938 PATMR3RemovePatch(pVM, pCtx->eip);
939 }
940
941 /** @todo Knoppix 5 regression when returning VINF_SUCCESS here and going back to raw mode. */
942 /* Note: possibly because a reschedule is required (e.g. iret to V86 code) */
943
944 return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
945 /* Interrupts are enabled; just go back to the original instruction.
946 return VINF_SUCCESS; */
947 }
948 return VINF_EM_RESCHEDULE_REM;
949 }
950
951 /*
952 * One instruction.
953 */
954 case VINF_PATCH_EMULATE_INSTR:
955 Log(("emR3PatchTrap: Emulate patched instruction at %RGv IF=%d VMIF=%x\n",
956 pNewEip, pCtx->eflags.Bits.u1IF, pVCpu->em.s.pPatmGCState->uVMFlags));
957 pCtx->eip = pNewEip;
958 AssertRelease(pCtx->eip);
959 return emR3ExecuteInstruction(pVM, pVCpu, "PATCHEMUL: ");
960
961 /*
962 * The patch was disabled, hand it to the REM.
963 */
964 case VERR_PATCH_DISABLED:
965 if (!(pVCpu->em.s.pPatmGCState->uVMFlags & X86_EFL_IF))
966 Log(("emR3PatchTrap: Virtual IF flag disabled!!\n"));
967 pCtx->eip = pNewEip;
968 AssertRelease(pCtx->eip);
969
970 if (pCtx->eflags.Bits.u1IF)
971 {
972 /*
973 * The last instruction in the patch block needs to be executed!! (sti/sysexit for example)
974 */
975 Log(("PATCH: IF=1 -> emulate last instruction as it can't be interrupted!!\n"));
976 return emR3ExecuteInstruction(pVM, pVCpu, "PATCHIR");
977 }
978 return VINF_EM_RESCHEDULE_REM;
979
980 /* Force continued patch exection; usually due to write monitored stack. */
981 case VINF_PATCH_CONTINUE:
982 return VINF_SUCCESS;
983
984 /*
985 * Anything else is *fatal*.
986 */
987 default:
988 AssertReleaseMsgFailed(("Unknown return code %Rrc from PATMR3HandleTrap!\n", rc));
989 return VERR_IPE_UNEXPECTED_STATUS;
990 }
991 }
992 return VINF_SUCCESS;
993}
994
995
996/**
997 * Handle a privileged instruction.
998 *
999 * @returns VBox status code suitable for EM.
1000 * @param pVM VM handle.
1001 * @param pVCpu VMCPU handle;
1002 */
1003static int emR3RawPrivileged(PVM pVM, PVMCPU pVCpu)
1004{
1005 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1006
1007 Assert(!pCtx->eflags.Bits.u1VM);
1008
1009 if (PATMIsEnabled(pVM))
1010 {
1011 /*
1012 * Check if in patch code.
1013 */
1014 if (PATMR3IsInsidePatchJump(pVM, pCtx->eip, NULL))
1015 {
1016#ifdef LOG_ENABLED
1017 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1018#endif
1019 AssertMsgFailed(("FATAL ERROR: executing random instruction inside generated patch jump %08X\n", pCtx->eip));
1020 return VERR_EM_RAW_PATCH_CONFLICT;
1021 }
1022 if ( (pCtx->ss & X86_SEL_RPL) == 0
1023 && !pCtx->eflags.Bits.u1VM
1024 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
1025 {
1026 int rc = PATMR3InstallPatch(pVM, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->eip),
1027 (SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT) ? PATMFL_CODE32 : 0);
1028 if (RT_SUCCESS(rc))
1029 {
1030#ifdef LOG_ENABLED
1031 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1032#endif
1033 DBGFR3DisasInstrCurrentLog(pVCpu, "Patched privileged instruction");
1034 return VINF_SUCCESS;
1035 }
1036 }
1037 }
1038
1039#ifdef LOG_ENABLED
1040 if (!PATMIsPatchGCAddr(pVM, pCtx->eip))
1041 {
1042 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1043 DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
1044 }
1045#endif
1046
1047 /*
1048 * Instruction statistics and logging.
1049 */
1050 DISCPUSTATE Cpu;
1051 int rc;
1052
1053 rc = CPUMR3DisasmInstrCPU(pVM, pVCpu, pCtx, pCtx->rip, &Cpu, "PRIV: ");
1054 if (RT_SUCCESS(rc))
1055 {
1056#ifdef VBOX_WITH_STATISTICS
1057 PEMSTATS pStats = pVCpu->em.s.CTX_SUFF(pStats);
1058 switch (Cpu.pCurInstr->opcode)
1059 {
1060 case OP_INVLPG:
1061 STAM_COUNTER_INC(&pStats->StatInvlpg);
1062 break;
1063 case OP_IRET:
1064 STAM_COUNTER_INC(&pStats->StatIret);
1065 break;
1066 case OP_CLI:
1067 STAM_COUNTER_INC(&pStats->StatCli);
1068 emR3RecordCli(pVM, pVCpu, pCtx->rip);
1069 break;
1070 case OP_STI:
1071 STAM_COUNTER_INC(&pStats->StatSti);
1072 break;
1073 case OP_INSB:
1074 case OP_INSWD:
1075 case OP_IN:
1076 case OP_OUTSB:
1077 case OP_OUTSWD:
1078 case OP_OUT:
1079 AssertMsgFailed(("Unexpected privileged exception due to port IO\n"));
1080 break;
1081
1082 case OP_MOV_CR:
1083 if (Cpu.param1.flags & USE_REG_GEN32)
1084 {
1085 //read
1086 Assert(Cpu.param2.flags & USE_REG_CR);
1087 Assert(Cpu.param2.base.reg_ctrl <= USE_REG_CR4);
1088 STAM_COUNTER_INC(&pStats->StatMovReadCR[Cpu.param2.base.reg_ctrl]);
1089 }
1090 else
1091 {
1092 //write
1093 Assert(Cpu.param1.flags & USE_REG_CR);
1094 Assert(Cpu.param1.base.reg_ctrl <= USE_REG_CR4);
1095 STAM_COUNTER_INC(&pStats->StatMovWriteCR[Cpu.param1.base.reg_ctrl]);
1096 }
1097 break;
1098
1099 case OP_MOV_DR:
1100 STAM_COUNTER_INC(&pStats->StatMovDRx);
1101 break;
1102 case OP_LLDT:
1103 STAM_COUNTER_INC(&pStats->StatMovLldt);
1104 break;
1105 case OP_LIDT:
1106 STAM_COUNTER_INC(&pStats->StatMovLidt);
1107 break;
1108 case OP_LGDT:
1109 STAM_COUNTER_INC(&pStats->StatMovLgdt);
1110 break;
1111 case OP_SYSENTER:
1112 STAM_COUNTER_INC(&pStats->StatSysEnter);
1113 break;
1114 case OP_SYSEXIT:
1115 STAM_COUNTER_INC(&pStats->StatSysExit);
1116 break;
1117 case OP_SYSCALL:
1118 STAM_COUNTER_INC(&pStats->StatSysCall);
1119 break;
1120 case OP_SYSRET:
1121 STAM_COUNTER_INC(&pStats->StatSysRet);
1122 break;
1123 case OP_HLT:
1124 STAM_COUNTER_INC(&pStats->StatHlt);
1125 break;
1126 default:
1127 STAM_COUNTER_INC(&pStats->StatMisc);
1128 Log4(("emR3RawPrivileged: opcode=%d\n", Cpu.pCurInstr->opcode));
1129 break;
1130 }
1131#endif /* VBOX_WITH_STATISTICS */
1132 if ( (pCtx->ss & X86_SEL_RPL) == 0
1133 && !pCtx->eflags.Bits.u1VM
1134 && SELMGetCpuModeFromSelector(pVM, pCtx->eflags, pCtx->cs, &pCtx->csHid) == CPUMODE_32BIT)
1135 {
1136 uint32_t size;
1137
1138 STAM_PROFILE_START(&pVCpu->em.s.StatPrivEmu, a);
1139 switch (Cpu.pCurInstr->opcode)
1140 {
1141 case OP_CLI:
1142 pCtx->eflags.u32 &= ~X86_EFL_IF;
1143 Assert(Cpu.opsize == 1);
1144 pCtx->rip += Cpu.opsize;
1145 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
1146 return VINF_EM_RESCHEDULE_REM; /* must go to the recompiler now! */
1147
1148 case OP_STI:
1149 pCtx->eflags.u32 |= X86_EFL_IF;
1150 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + Cpu.opsize);
1151 Assert(Cpu.opsize == 1);
1152 pCtx->rip += Cpu.opsize;
1153 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
1154 return VINF_SUCCESS;
1155
1156 case OP_HLT:
1157 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1158 {
1159 PATMTRANSSTATE enmState;
1160 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->eip, &enmState);
1161
1162 if (enmState == PATMTRANS_OVERWRITTEN)
1163 {
1164 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
1165 Assert(rc == VERR_PATCH_DISABLED);
1166 /* Conflict detected, patch disabled */
1167 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %08RX32\n", pCtx->eip));
1168
1169 enmState = PATMTRANS_SAFE;
1170 }
1171
1172 /* The translation had better be successful. Otherwise we can't recover. */
1173 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %08RX32\n", pCtx->eip));
1174 if (enmState != PATMTRANS_OVERWRITTEN)
1175 pCtx->eip = pOrgInstrGC;
1176 }
1177 /* no break; we could just return VINF_EM_HALT here */
1178
1179 case OP_MOV_CR:
1180 case OP_MOV_DR:
1181#ifdef LOG_ENABLED
1182 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1183 {
1184 DBGFR3InfoLog(pVM, "cpumguest", "PRIV");
1185 DBGFR3DisasInstrCurrentLog(pVCpu, "Privileged instr: ");
1186 }
1187#endif
1188
1189 rc = EMInterpretInstructionCPU(pVM, pVCpu, &Cpu, CPUMCTX2CORE(pCtx), 0, &size);
1190 if (RT_SUCCESS(rc))
1191 {
1192 pCtx->rip += Cpu.opsize;
1193 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
1194
1195 if ( Cpu.pCurInstr->opcode == OP_MOV_CR
1196 && Cpu.param1.flags == USE_REG_CR /* write */
1197 )
1198 {
1199 /* Deal with CR0 updates inside patch code that force
1200 * us to go to the recompiler.
1201 */
1202 if ( PATMIsPatchGCAddr(pVM, pCtx->rip)
1203 && (pCtx->cr0 & (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE)) != (X86_CR0_WP|X86_CR0_PG|X86_CR0_PE))
1204 {
1205 PATMTRANSSTATE enmState;
1206 RTGCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtx->rip, &enmState);
1207
1208 Log(("Force recompiler switch due to cr0 (%RGp) update rip=%RGv -> %RGv (enmState=%d)\n", pCtx->cr0, pCtx->rip, pOrgInstrGC, enmState));
1209 if (enmState == PATMTRANS_OVERWRITTEN)
1210 {
1211 rc = PATMR3DetectConflict(pVM, pOrgInstrGC, pOrgInstrGC);
1212 Assert(rc == VERR_PATCH_DISABLED);
1213 /* Conflict detected, patch disabled */
1214 Log(("emR3RawPrivileged: detected conflict -> disabled patch at %RGv\n", (RTGCPTR)pCtx->rip));
1215 enmState = PATMTRANS_SAFE;
1216 }
1217 /* The translation had better be successful. Otherwise we can't recover. */
1218 AssertReleaseMsg(pOrgInstrGC && enmState != PATMTRANS_OVERWRITTEN, ("Unable to translate instruction address at %RGv\n", (RTGCPTR)pCtx->rip));
1219 if (enmState != PATMTRANS_OVERWRITTEN)
1220 pCtx->rip = pOrgInstrGC;
1221 }
1222
1223 /* Reschedule is necessary as the execution/paging mode might have changed. */
1224 return VINF_EM_RESCHEDULE;
1225 }
1226 return rc; /* can return VINF_EM_HALT as well. */
1227 }
1228 AssertMsgReturn(rc == VERR_EM_INTERPRETER, ("%Rrc\n", rc), rc);
1229 break; /* fall back to the recompiler */
1230 }
1231 STAM_PROFILE_STOP(&pVCpu->em.s.StatPrivEmu, a);
1232 }
1233 }
1234
1235 if (PATMIsPatchGCAddr(pVM, pCtx->eip))
1236 return emR3PatchTrap(pVM, pVCpu, pCtx, VINF_PATM_PATCH_TRAP_GP);
1237
1238 return emR3ExecuteInstruction(pVM, pVCpu, "PRIV");
1239}
1240
1241
1242/**
1243 * Update the forced rawmode execution modifier.
1244 *
1245 * This function is called when we're returning from the raw-mode loop(s). If we're
1246 * in patch code, it will set a flag forcing execution to be resumed in raw-mode,
1247 * if not in patch code, the flag will be cleared.
1248 *
1249 * We should never interrupt patch code while it's being executed. Cli patches can
1250 * contain big code blocks, but they are always executed with IF=0. Other patches
1251 * replace single instructions and should be atomic.
1252 *
1253 * @returns Updated rc.
1254 *
1255 * @param pVM The VM handle.
1256 * @param pVCpu The VMCPU handle.
1257 * @param pCtx The guest CPU context.
1258 * @param rc The result code.
1259 */
1260int emR3RawUpdateForceFlag(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, int rc)
1261{
1262 if (PATMIsPatchGCAddr(pVM, pCtx->eip)) /** @todo check cs selector base/type */
1263 {
1264 /* ignore reschedule attempts. */
1265 switch (rc)
1266 {
1267 case VINF_EM_RESCHEDULE:
1268 case VINF_EM_RESCHEDULE_REM:
1269 LogFlow(("emR3RawUpdateForceFlag: patch address -> force raw reschedule\n"));
1270 rc = VINF_SUCCESS;
1271 break;
1272 }
1273 pVCpu->em.s.fForceRAW = true;
1274 }
1275 else
1276 pVCpu->em.s.fForceRAW = false;
1277 return rc;
1278}
1279
1280
1281/**
1282 * Check for pending raw actions
1283 *
1284 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
1285 * EM statuses.
1286 * @param pVM The VM to operate on.
1287 * @param pVCpu The VMCPU handle.
1288 */
1289VMMR3DECL(int) EMR3CheckRawForcedActions(PVM pVM, PVMCPU pVCpu)
1290{
1291 return emR3RawForcedActions(pVM, pVCpu, pVCpu->em.s.pCtx);
1292}
1293
1294
1295/**
1296 * Process raw-mode specific forced actions.
1297 *
1298 * This function is called when any FFs in the VM_FF_HIGH_PRIORITY_PRE_RAW_MASK is pending.
1299 *
1300 * @returns VBox status code. May return VINF_EM_NO_MEMORY but none of the other
1301 * EM statuses.
1302 * @param pVM The VM handle.
1303 * @param pVCpu The VMCPU handle.
1304 * @param pCtx The guest CPUM register context.
1305 */
1306static int emR3RawForcedActions(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1307{
1308 /*
1309 * Note that the order is *vitally* important!
1310 * Also note that SELMR3UpdateFromCPUM may trigger VM_FF_SELM_SYNC_TSS.
1311 */
1312
1313
1314 /*
1315 * Sync selector tables.
1316 */
1317 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT))
1318 {
1319 int rc = SELMR3UpdateFromCPUM(pVM, pVCpu);
1320 if (RT_FAILURE(rc))
1321 return rc;
1322 }
1323
1324 /*
1325 * Sync IDT.
1326 *
1327 * The CSAMR3CheckGates call in TRPMR3SyncIDT may call PGMPrefetchPage
1328 * and PGMShwModifyPage, so we're in for trouble if for instance a
1329 * PGMSyncCR3+pgmR3PoolClearAll is pending.
1330 */
1331 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TRPM_SYNC_IDT))
1332 {
1333 if ( VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3)
1334 && EMIsRawRing0Enabled(pVM)
1335 && CSAMIsEnabled(pVM))
1336 {
1337 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
1338 if (RT_FAILURE(rc))
1339 return rc;
1340 }
1341
1342 int rc = TRPMR3SyncIDT(pVM, pVCpu);
1343 if (RT_FAILURE(rc))
1344 return rc;
1345 }
1346
1347 /*
1348 * Sync TSS.
1349 */
1350 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS))
1351 {
1352 int rc = SELMR3SyncTSS(pVM, pVCpu);
1353 if (RT_FAILURE(rc))
1354 return rc;
1355 }
1356
1357 /*
1358 * Sync page directory.
1359 */
1360 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
1361 {
1362 Assert(pVCpu->em.s.enmState != EMSTATE_WAIT_SIPI);
1363 int rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
1364 if (RT_FAILURE(rc))
1365 return rc;
1366
1367 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
1368
1369 /* Prefetch pages for EIP and ESP. */
1370 /** @todo This is rather expensive. Should investigate if it really helps at all. */
1371 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_CS, CPUMCTX2CORE(pCtx), pCtx->rip));
1372 if (rc == VINF_SUCCESS)
1373 rc = PGMPrefetchPage(pVCpu, SELMToFlat(pVM, DIS_SELREG_SS, CPUMCTX2CORE(pCtx), pCtx->rsp));
1374 if (rc != VINF_SUCCESS)
1375 {
1376 if (rc != VINF_PGM_SYNC_CR3)
1377 {
1378 AssertLogRelMsgReturn(RT_FAILURE(rc), ("%Rrc\n", rc), VERR_IPE_UNEXPECTED_INFO_STATUS);
1379 return rc;
1380 }
1381 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
1382 if (RT_FAILURE(rc))
1383 return rc;
1384 }
1385 /** @todo maybe prefetch the supervisor stack page as well */
1386 Assert(!VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT));
1387 }
1388
1389 /*
1390 * Allocate handy pages (just in case the above actions have consumed some pages).
1391 */
1392 if (VM_FF_IS_PENDING_EXCEPT(pVM, VM_FF_PGM_NEED_HANDY_PAGES, VM_FF_PGM_NO_MEMORY))
1393 {
1394 int rc = PGMR3PhysAllocateHandyPages(pVM);
1395 if (RT_FAILURE(rc))
1396 return rc;
1397 }
1398
1399 /*
1400 * Check whether we're out of memory now.
1401 *
1402 * This may stem from some of the above actions or operations that has been executed
1403 * since we ran FFs. The allocate handy pages must for instance always be followed by
1404 * this check.
1405 */
1406 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY))
1407 return VINF_EM_NO_MEMORY;
1408
1409 return VINF_SUCCESS;
1410}
1411
1412
1413/**
1414 * Executes raw code.
1415 *
1416 * This function contains the raw-mode version of the inner
1417 * execution loop (the outer loop being in EMR3ExecuteVM()).
1418 *
1419 * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE,
1420 * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE.
1421 *
1422 * @param pVM VM handle.
1423 * @param pVCpu VMCPU handle.
1424 * @param pfFFDone Where to store an indicator telling whether or not
1425 * FFs were done before returning.
1426 */
1427int emR3RawExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone)
1428{
1429 STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTotal, a);
1430
1431 int rc = VERR_INTERNAL_ERROR;
1432 PCPUMCTX pCtx = pVCpu->em.s.pCtx;
1433 LogFlow(("emR3RawExecute: (cs:eip=%04x:%08x)\n", pCtx->cs, pCtx->eip));
1434 pVCpu->em.s.fForceRAW = false;
1435 *pfFFDone = false;
1436
1437
1438 /*
1439 *
1440 * Spin till we get a forced action or raw mode status code resulting in
1441 * in anything but VINF_SUCCESS or VINF_EM_RESCHEDULE_RAW.
1442 *
1443 */
1444 for (;;)
1445 {
1446 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWEntry, b);
1447
1448 /*
1449 * Check various preconditions.
1450 */
1451#ifdef VBOX_STRICT
1452 Assert(REMR3QueryPendingInterrupt(pVM, pVCpu) == REM_NO_PENDING_IRQ);
1453 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) == 3 || (pCtx->ss & X86_SEL_RPL) == 0);
1454 AssertMsg( (pCtx->eflags.u32 & X86_EFL_IF)
1455 || PATMShouldUseRawMode(pVM, (RTGCPTR)pCtx->eip),
1456 ("Tried to execute code with IF at EIP=%08x!\n", pCtx->eip));
1457 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1458 && PGMMapHasConflicts(pVM))
1459 {
1460 PGMMapCheck(pVM);
1461 AssertMsgFailed(("We should not get conflicts any longer!!!\n"));
1462 return VERR_INTERNAL_ERROR;
1463 }
1464#endif /* VBOX_STRICT */
1465
1466 /*
1467 * Process high priority pre-execution raw-mode FFs.
1468 */
1469 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
1470 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1471 {
1472 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
1473 if (rc != VINF_SUCCESS)
1474 break;
1475 }
1476
1477 /*
1478 * If we're going to execute ring-0 code, the guest state needs to
1479 * be modified a bit and some of the state components (IF, SS/CS RPL,
1480 * and perhaps EIP) needs to be stored with PATM.
1481 */
1482 rc = CPUMRawEnter(pVCpu, NULL);
1483 if (rc != VINF_SUCCESS)
1484 {
1485 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
1486 break;
1487 }
1488
1489 /*
1490 * Scan code before executing it. Don't bother with user mode or V86 code
1491 */
1492 if ( (pCtx->ss & X86_SEL_RPL) <= 1
1493 && !pCtx->eflags.Bits.u1VM
1494 && !PATMIsPatchGCAddr(pVM, pCtx->eip))
1495 {
1496 STAM_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWEntry, b);
1497 CSAMR3CheckCodeEx(pVM, CPUMCTX2CORE(pCtx), pCtx->eip);
1498 STAM_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWEntry, b);
1499 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK)
1500 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1501 {
1502 rc = emR3RawForcedActions(pVM, pVCpu, pCtx);
1503 if (rc != VINF_SUCCESS)
1504 {
1505 rc = CPUMRawLeave(pVCpu, NULL, rc);
1506 break;
1507 }
1508 }
1509 }
1510
1511#ifdef LOG_ENABLED
1512 /*
1513 * Log important stuff before entering GC.
1514 */
1515 PPATMGCSTATE pGCState = PATMR3QueryGCStateHC(pVM);
1516 if (pCtx->eflags.Bits.u1VM)
1517 Log(("RV86: %04X:%08X IF=%d VMFlags=%x\n", pCtx->cs, pCtx->eip, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
1518 else if ((pCtx->ss & X86_SEL_RPL) == 1)
1519 {
1520 bool fCSAMScanned = CSAMIsPageScanned(pVM, (RTGCPTR)pCtx->eip);
1521 Log(("RR0: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d (Scanned=%d)\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL), fCSAMScanned));
1522 }
1523 else if ((pCtx->ss & X86_SEL_RPL) == 3)
1524 Log(("RR3: %08X ESP=%08X IF=%d VMFlags=%x\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags));
1525#endif /* LOG_ENABLED */
1526
1527
1528
1529 /*
1530 * Execute the code.
1531 */
1532 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWEntry, b);
1533 STAM_PROFILE_START(&pVCpu->em.s.StatRAWExec, c);
1534 rc = VMMR3RawRunGC(pVM, pVCpu);
1535 STAM_PROFILE_STOP(&pVCpu->em.s.StatRAWExec, c);
1536 STAM_PROFILE_ADV_START(&pVCpu->em.s.StatRAWTail, d);
1537
1538 LogFlow(("RR0-E: %08X ESP=%08X IF=%d VMFlags=%x PIF=%d CPL=%d\n", pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pGCState->uVMFlags, pGCState->fPIF, (pCtx->ss & X86_SEL_RPL)));
1539 LogFlow(("VMMR3RawRunGC returned %Rrc\n", rc));
1540
1541
1542
1543 /*
1544 * Restore the real CPU state and deal with high priority post
1545 * execution FFs before doing anything else.
1546 */
1547 rc = CPUMRawLeave(pVCpu, NULL, rc);
1548 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK);
1549 if ( VM_FF_ISPENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK)
1550 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK))
1551 rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc);
1552
1553#ifdef VBOX_STRICT
1554 /*
1555 * Assert TSS consistency & rc vs patch code.
1556 */
1557 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT) /* GDT implies TSS at the moment. */
1558 && EMIsRawRing0Enabled(pVM))
1559 SELMR3CheckTSS(pVM);
1560 switch (rc)
1561 {
1562 case VINF_SUCCESS:
1563 case VINF_EM_RAW_INTERRUPT:
1564 case VINF_PATM_PATCH_TRAP_PF:
1565 case VINF_PATM_PATCH_TRAP_GP:
1566 case VINF_PATM_PATCH_INT3:
1567 case VINF_PATM_CHECK_PATCH_PAGE:
1568 case VINF_EM_RAW_EXCEPTION_PRIVILEGED:
1569 case VINF_EM_RAW_GUEST_TRAP:
1570 case VINF_EM_RESCHEDULE_RAW:
1571 break;
1572
1573 default:
1574 if (PATMIsPatchGCAddr(pVM, pCtx->eip) && !(pCtx->eflags.u32 & X86_EFL_TF))
1575 LogIt(NULL, 0, LOG_GROUP_PATM, ("Patch code interrupted at %RRv for reason %Rrc\n", (RTRCPTR)CPUMGetGuestEIP(pVCpu), rc));
1576 break;
1577 }
1578 /*
1579 * Let's go paranoid!
1580 */
1581 if ( !VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL)
1582 && PGMMapHasConflicts(pVM))
1583 {
1584 PGMMapCheck(pVM);
1585 AssertMsgFailed(("We should not get conflicts any longer!!! rc=%Rrc\n", rc));
1586 return VERR_INTERNAL_ERROR;
1587 }
1588#endif /* VBOX_STRICT */
1589
1590 /*
1591 * Process the returned status code.
1592 */
1593 if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST)
1594 {
1595 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
1596 break;
1597 }
1598 rc = emR3RawHandleRC(pVM, pVCpu, pCtx, rc);
1599 if (rc != VINF_SUCCESS)
1600 {
1601 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1602 if (rc != VINF_SUCCESS)
1603 {
1604 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
1605 break;
1606 }
1607 }
1608
1609 /*
1610 * Check and execute forced actions.
1611 */
1612#ifdef VBOX_HIGH_RES_TIMERS_HACK
1613 TMTimerPollVoid(pVM, pVCpu);
1614#endif
1615 STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTail, d);
1616 if ( VM_FF_ISPENDING(pVM, ~VM_FF_HIGH_PRIORITY_PRE_RAW_MASK | VM_FF_PGM_NO_MEMORY)
1617 || VMCPU_FF_ISPENDING(pVCpu, ~VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK))
1618 {
1619 Assert(pCtx->eflags.Bits.u1VM || (pCtx->ss & X86_SEL_RPL) != 1);
1620
1621 STAM_REL_PROFILE_ADV_SUSPEND(&pVCpu->em.s.StatRAWTotal, a);
1622 rc = emR3ForcedActions(pVM, pVCpu, rc);
1623 STAM_REL_PROFILE_ADV_RESUME(&pVCpu->em.s.StatRAWTotal, a);
1624 if ( rc != VINF_SUCCESS
1625 && rc != VINF_EM_RESCHEDULE_RAW)
1626 {
1627 rc = emR3RawUpdateForceFlag(pVM, pVCpu, pCtx, rc);
1628 if (rc != VINF_SUCCESS)
1629 {
1630 *pfFFDone = true;
1631 break;
1632 }
1633 }
1634 }
1635 }
1636
1637 /*
1638 * Return to outer loop.
1639 */
1640#if defined(LOG_ENABLED) && defined(DEBUG)
1641 RTLogFlush(NULL);
1642#endif
1643 STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatRAWTotal, a);
1644 return rc;
1645}
1646
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette