VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp@ 7476

Last change on this file since 7476 was 7471, checked in by vboxsync, 17 years ago

Rewrote VT-x & AMD-V mode changes. Requires the MP apis in our runtime to function properly. (only tested Windows)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 75.5 KB
Line 
1/* $Id: HWVMXR0.cpp 7471 2008-03-17 10:50:10Z vboxsync $ */
2/** @file
3 * HWACCM VMX - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 innotek GmbH
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HWACCM
23#include <VBox/hwaccm.h>
24#include "HWACCMInternal.h"
25#include <VBox/vm.h>
26#include <VBox/x86.h>
27#include <VBox/pgm.h>
28#include <VBox/pdm.h>
29#include <VBox/err.h>
30#include <VBox/log.h>
31#include <VBox/selm.h>
32#include <VBox/iom.h>
33#include <iprt/param.h>
34#include <iprt/assert.h>
35#include <iprt/asm.h>
36#include "HWVMXR0.h"
37
38
39/* IO operation lookup arrays. */
40static uint32_t aIOSize[4] = {1, 2, 0, 4};
41static uint32_t aIOOpAnd[4] = {0xff, 0xffff, 0, 0xffffffff};
42
43
44static void VMXR0CheckError(PVM pVM, int rc)
45{
46 if (rc == VERR_VMX_GENERIC)
47 {
48 RTCCUINTREG instrError;
49
50 VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
51 pVM->hwaccm.s.vmx.ulLastInstrError = instrError;
52 }
53 pVM->hwaccm.s.lLastError = rc;
54}
55
56/**
57 * Sets up and activates VT-x on the current CPU
58 *
59 * @returns VBox status code.
60 * @param idCpu The identifier for the CPU the function is called on.
61 * @param pVM The VM to operate on.
62 * @param pvPageCpu Pointer to the global cpu page
63 * @param pPageCpuPhys Physical address of the global cpu page
64 */
65HWACCMR0DECL(int) VMXR0EnableCpu(RTCPUID idCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
66{
67 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
68 AssertReturn(pVM, VERR_INVALID_PARAMETER);
69 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
70
71 /* Setup Intel VMX. */
72 Assert(pVM->hwaccm.s.vmx.fSupported);
73
74 /* Set revision dword at the beginning of the VMXON structure. */
75 *(uint32_t *)pvPageCpu = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
76
77 /* @todo we should unmap the two pages from the virtual address space in order to prevent accidental corruption.
78 * (which can have very bad consequences!!!)
79 */
80
81 /* Make sure the VMX instructions don't cause #UD faults. */
82 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
83
84 /* Enter VMX Root Mode */
85 int rc = VMXEnable(pPageCpuPhys);
86 if (VBOX_FAILURE(rc))
87 {
88 VMXR0CheckError(pVM, rc);
89 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
90 return VERR_VMX_VMXON_FAILED;
91 }
92
93 return VINF_SUCCESS;
94}
95
96/**
97 * Deactivates VT-x on the current CPU
98 *
99 * @returns VBox status code.
100 * @param idCpu The identifier for the CPU the function is called on.
101 * @param pvPageCpu Pointer to the global cpu page
102 * @param pPageCpuPhys Physical address of the global cpu page
103 */
104HWACCMR0DECL(int) VMXR0DisableCpu(RTCPUID idCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
105{
106 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
107 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
108
109 /* Leave VMX Root Mode. */
110 VMXDisable();
111
112 /* And clear the X86_CR4_VMXE bit */
113 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
114 return VINF_SUCCESS;
115}
116
117/**
118 * Sets up VT-x for the specified VM
119 *
120 * @returns VBox status code.
121 * @param pVM The VM to operate on.
122 */
123HWACCMR0DECL(int) VMXR0SetupVM(PVM pVM)
124{
125 int rc = VINF_SUCCESS;
126 uint32_t val;
127
128 if (pVM == NULL)
129 return VERR_INVALID_PARAMETER;
130
131 /* Set revision dword at the beginning of the VMCS structure. */
132 *(uint32_t *)pVM->hwaccm.s.vmx.pVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
133
134 /* Clear VM Control Structure. */
135 Log(("pVMCSPhys = %VHp\n", pVM->hwaccm.s.vmx.pVMCSPhys));
136 rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
137 if (VBOX_FAILURE(rc))
138 goto vmx_end;
139
140 /* Activate the VM Control Structure. */
141 rc = VMXActivateVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
142 if (VBOX_FAILURE(rc))
143 goto vmx_end;
144
145 /* VMX_VMCS_CTRL_PIN_EXEC_CONTROLS
146 * Set required bits to one and zero according to the MSR capabilities.
147 */
148 val = (pVM->hwaccm.s.vmx.msr.vmx_pin_ctls & 0xFFFFFFFF);
149 /* External and non-maskable interrupts cause VM-exits. */
150 val = val | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;
151 val &= (pVM->hwaccm.s.vmx.msr.vmx_pin_ctls >> 32ULL);
152
153 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, val);
154 AssertRC(rc);
155
156 /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS
157 * Set required bits to one and zero according to the MSR capabilities.
158 */
159 val = (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & 0xFFFFFFFF);
160 /* Program which event cause VM-exits and which features we want to use. */
161 val = val | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
162 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET
163 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
164 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT
165 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT
166 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
167
168 /** @note VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch failure with an invalid control fields error. (combined with some other exit reasons) */
169
170 /*
171 if AMD64 guest mode
172 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT
173 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT;
174 */
175#if HC_ARCH_BITS == 64
176 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT
177 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT;
178#endif
179 /* Mask away the bits that the CPU doesn't support */
180 /** @todo make sure they don't conflict with the above requirements. */
181 val &= (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls >> 32ULL);
182 pVM->hwaccm.s.vmx.proc_ctls = val;
183
184 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, val);
185 AssertRC(rc);
186
187 /* VMX_VMCS_CTRL_CR3_TARGET_COUNT
188 * Set required bits to one and zero according to the MSR capabilities.
189 */
190 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR3_TARGET_COUNT, 0);
191 AssertRC(rc);
192
193 /* VMX_VMCS_CTRL_ENTRY_CONTROLS
194 * Set required bits to one and zero according to the MSR capabilities.
195 */
196 val = (pVM->hwaccm.s.vmx.msr.vmx_entry & 0xFFFFFFFF);
197 if (pVM->hwaccm.s.cpuid.u32AMDFeatureEDX & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE)
198 {
199 /** @todo 32 bits guest mode only for now. */
200 /* val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE; */
201 }
202 /* Mask away the bits that the CPU doesn't support */
203 /** @todo make sure they don't conflict with the above requirements. */
204 val &= (pVM->hwaccm.s.vmx.msr.vmx_entry >> 32ULL);
205 /* else Must be zero when AMD64 is not available. */
206 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
207 AssertRC(rc);
208
209 /* VMX_VMCS_CTRL_EXIT_CONTROLS
210 * Set required bits to one and zero according to the MSR capabilities.
211 */
212 val = (pVM->hwaccm.s.vmx.msr.vmx_exit & 0xFFFFFFFF);
213#if HC_ARCH_BITS == 64
214 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;
215#else
216 /* else Must be zero when AMD64 is not available. */
217#endif
218 val &= (pVM->hwaccm.s.vmx.msr.vmx_exit >> 32ULL);
219 /* Don't acknowledge external interrupts on VM-exit. */
220 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, val);
221 AssertRC(rc);
222
223 /* Forward all exception except #NM & #PF to the guest.
224 * We always need to check pagefaults since our shadow page table can be out of sync.
225 * And we always lazily sync the FPU & XMM state.
226 */
227
228 /*
229 * @todo Possible optimization:
230 * Keep the FPU and XMM state current in the EM thread. That way there's no need to
231 * lazily sync anything, but the downside is that we can't use the FPU stack or XMM
232 * registers ourselves of course.
233 *
234 * @note only possible if the current state is actually ours (X86_CR0_TS flag)
235 */
236 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, HWACCM_VMX_TRAP_MASK);
237 AssertRC(rc);
238
239 /* Don't filter page faults; all of them should cause a switch. */
240 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MASK, 0);
241 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MATCH, 0);
242 AssertRC(rc);
243
244 /* Init TSC offset to zero. */
245 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, 0);
246#if HC_ARCH_BITS == 32
247 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_HIGH, 0);
248#endif
249 AssertRC(rc);
250
251 rc = VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_A_FULL, 0);
252#if HC_ARCH_BITS == 32
253 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_A_HIGH, 0);
254#endif
255 AssertRC(rc);
256
257 rc = VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_B_FULL, 0);
258#if HC_ARCH_BITS == 32
259 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_B_HIGH, 0);
260#endif
261 AssertRC(rc);
262
263 /* Clear MSR controls. */
264 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
265 {
266 /* Optional */
267 rc = VMXWriteVMCS(VMX_VMCS_CTRL_MSR_BITMAP_FULL, 0);
268#if HC_ARCH_BITS == 32
269 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_MSR_BITMAP_HIGH, 0);
270#endif
271 AssertRC(rc);
272 }
273 rc = VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, 0);
274 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, 0);
275 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, 0);
276#if HC_ARCH_BITS == 32
277 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_HIGH, 0);
278 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_HIGH, 0);
279 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_HIGH, 0);
280#endif
281 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
282 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0);
283 AssertRC(rc);
284
285 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
286 {
287 /* Optional */
288 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_TRESHOLD, 0);
289 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, 0);
290#if HC_ARCH_BITS == 32
291 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VAPIC_PAGEADDR_HIGH, 0);
292#endif
293 AssertRC(rc);
294 }
295
296 /* Set link pointer to -1. Not currently used. */
297#if HC_ARCH_BITS == 32
298 rc = VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFF);
299 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_HIGH, 0xFFFFFFFF);
300#else
301 rc = VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFFFFFFFFFF);
302#endif
303 AssertRC(rc);
304
305 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
306 rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
307 AssertRC(rc);
308
309vmx_end:
310 VMXR0CheckError(pVM, rc);
311 return rc;
312}
313
314
315/**
316 * Injects an event (trap or external interrupt)
317 *
318 * @returns VBox status code.
319 * @param pVM The VM to operate on.
320 * @param pCtx CPU Context
321 * @param intInfo VMX interrupt info
322 * @param cbInstr Opcode length of faulting instruction
323 * @param errCode Error code (optional)
324 */
325static int VMXR0InjectEvent(PVM pVM, CPUMCTX *pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode)
326{
327 int rc;
328
329#ifdef VBOX_STRICT
330 uint32_t iGate = VMX_EXIT_INTERRUPTION_INFO_VECTOR(intInfo);
331 if (iGate == 0xE)
332 Log2(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x CR2=%08x intInfo=%08x\n", iGate, pCtx->eip, errCode, pCtx->cr2, intInfo));
333 else
334 if (iGate < 0x20)
335 Log2(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x\n", iGate, pCtx->eip, errCode));
336 else
337 {
338 Log2(("INJ-EI: %x at %VGv\n", iGate, pCtx->eip));
339 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
340 Assert(pCtx->eflags.u32 & X86_EFL_IF);
341 }
342#endif
343
344 /* Set event injection state. */
345 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_IRQ_INFO,
346 intInfo | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT)
347 );
348
349 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
350 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE, errCode);
351
352 AssertRC(rc);
353 return rc;
354}
355
356
357/**
358 * Checks for pending guest interrupts and injects them
359 *
360 * @returns VBox status code.
361 * @param pVM The VM to operate on.
362 * @param pCtx CPU Context
363 */
364static int VMXR0CheckPendingInterrupt(PVM pVM, CPUMCTX *pCtx)
365{
366 int rc;
367
368 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
369 if (pVM->hwaccm.s.Event.fPending)
370 {
371 Log(("Reinjecting event %VX64 %08x at %VGv\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, pCtx->eip));
372 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
373 rc = VMXR0InjectEvent(pVM, pCtx, pVM->hwaccm.s.Event.intInfo, 0, pVM->hwaccm.s.Event.errCode);
374 AssertRC(rc);
375
376 pVM->hwaccm.s.Event.fPending = false;
377 return VINF_SUCCESS;
378 }
379
380 /* When external interrupts are pending, we should exit the VM when IF is set. */
381 if ( !TRPMHasTrap(pVM)
382 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
383 {
384 if (!(pCtx->eflags.u32 & X86_EFL_IF))
385 {
386 Log2(("Enable irq window exit!\n"));
387 pVM->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
388 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
389 AssertRC(rc);
390 }
391 else
392 if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
393 {
394 uint8_t u8Interrupt;
395
396 rc = PDMGetInterrupt(pVM, &u8Interrupt);
397 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Vrc\n", u8Interrupt, u8Interrupt, rc));
398 if (VBOX_SUCCESS(rc))
399 {
400 rc = TRPMAssertTrap(pVM, u8Interrupt, TRPM_HARDWARE_INT);
401 AssertRC(rc);
402 }
403 else
404 {
405 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
406 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
407 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchGuestIrq);
408 /* Just continue */
409 }
410 }
411 else
412 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", pCtx->eip));
413 }
414
415#ifdef VBOX_STRICT
416 if (TRPMHasTrap(pVM))
417 {
418 uint8_t u8Vector;
419 rc = TRPMQueryTrapAll(pVM, &u8Vector, 0, 0, 0);
420 AssertRC(rc);
421 }
422#endif
423
424 if ( pCtx->eflags.u32 & X86_EFL_IF
425 && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
426 && TRPMHasTrap(pVM)
427 )
428 {
429 uint8_t u8Vector;
430 int rc;
431 TRPMEVENT enmType;
432 RTGCUINTPTR intInfo, errCode;
433
434 /* If a new event is pending, then dispatch it now. */
435 rc = TRPMQueryTrapAll(pVM, &u8Vector, &enmType, &errCode, 0);
436 AssertRC(rc);
437 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
438 Assert(enmType != TRPM_SOFTWARE_INT);
439
440 /* Clear the pending trap. */
441 rc = TRPMResetTrap(pVM);
442 AssertRC(rc);
443
444 intInfo = u8Vector;
445 intInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
446
447 if (enmType == TRPM_TRAP)
448 {
449 switch (u8Vector) {
450 case 8:
451 case 10:
452 case 11:
453 case 12:
454 case 13:
455 case 14:
456 case 17:
457 /* Valid error codes. */
458 intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
459 break;
460 default:
461 break;
462 }
463 if (u8Vector == X86_XCPT_BP || u8Vector == X86_XCPT_OF)
464 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
465 else
466 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
467 }
468 else
469 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
470
471 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject);
472 rc = VMXR0InjectEvent(pVM, pCtx, intInfo, 0, errCode);
473 AssertRC(rc);
474 } /* if (interrupts can be dispatched) */
475
476 return VINF_SUCCESS;
477}
478
479/**
480 * Save the host state
481 *
482 * @returns VBox status code.
483 * @param pVM The VM to operate on.
484 */
485HWACCMR0DECL(int) VMXR0SaveHostState(PVM pVM)
486{
487 int rc = VINF_SUCCESS;
488
489 /*
490 * Host CPU Context
491 */
492 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
493 {
494 RTIDTR idtr;
495 RTGDTR gdtr;
496 RTSEL SelTR;
497 PX86DESCHC pDesc;
498 uintptr_t trBase;
499
500 /* Control registers */
501 rc = VMXWriteVMCS(VMX_VMCS_HOST_CR0, ASMGetCR0());
502 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR3, ASMGetCR3());
503 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR4, ASMGetCR4());
504 AssertRC(rc);
505 Log2(("VMX_VMCS_HOST_CR0 %08x\n", ASMGetCR0()));
506 Log2(("VMX_VMCS_HOST_CR3 %VHp\n", ASMGetCR3()));
507 Log2(("VMX_VMCS_HOST_CR4 %08x\n", ASMGetCR4()));
508
509 /* Selector registers. */
510 rc = VMXWriteVMCS(VMX_VMCS_HOST_FIELD_CS, ASMGetCS());
511 /** @note VMX is (again) very picky about the RPL of the selectors here; we'll restore them manually. */
512 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_DS, 0);
513 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_ES, 0);
514#if HC_ARCH_BITS == 32
515 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_FS, 0);
516 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_GS, 0);
517#endif
518 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_SS, ASMGetSS());
519 SelTR = ASMGetTR();
520 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_TR, SelTR);
521 AssertRC(rc);
522 Log2(("VMX_VMCS_HOST_FIELD_CS %08x\n", ASMGetCS()));
523 Log2(("VMX_VMCS_HOST_FIELD_DS %08x\n", ASMGetDS()));
524 Log2(("VMX_VMCS_HOST_FIELD_ES %08x\n", ASMGetES()));
525 Log2(("VMX_VMCS_HOST_FIELD_FS %08x\n", ASMGetFS()));
526 Log2(("VMX_VMCS_HOST_FIELD_GS %08x\n", ASMGetGS()));
527 Log2(("VMX_VMCS_HOST_FIELD_SS %08x\n", ASMGetSS()));
528 Log2(("VMX_VMCS_HOST_FIELD_TR %08x\n", ASMGetTR()));
529
530 /* GDTR & IDTR */
531 ASMGetGDTR(&gdtr);
532 rc = VMXWriteVMCS(VMX_VMCS_HOST_GDTR_BASE, gdtr.pGdt);
533 ASMGetIDTR(&idtr);
534 rc |= VMXWriteVMCS(VMX_VMCS_HOST_IDTR_BASE, idtr.pIdt);
535 AssertRC(rc);
536 Log2(("VMX_VMCS_HOST_GDTR_BASE %VHv\n", gdtr.pGdt));
537 Log2(("VMX_VMCS_HOST_IDTR_BASE %VHv\n", idtr.pIdt));
538
539 /* Save the base address of the TR selector. */
540 if (SelTR > gdtr.cbGdt)
541 {
542 AssertMsgFailed(("Invalid TR selector %x. GDTR.cbGdt=%x\n", SelTR, gdtr.cbGdt));
543 return VERR_VMX_INVALID_HOST_STATE;
544 }
545
546 pDesc = &((PX86DESCHC)gdtr.pGdt)[SelTR >> X86_SEL_SHIFT_HC];
547#if HC_ARCH_BITS == 64
548 trBase = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16ULL) | (pDesc->Gen.u8BaseHigh2 << 24ULL) | ((uintptr_t)pDesc->Gen.u32BaseHigh3 << 32ULL);
549#else
550 trBase = pDesc->Gen.u16BaseLow | (pDesc->Gen.u8BaseHigh1 << 16) | (pDesc->Gen.u8BaseHigh2 << 24);
551#endif
552 rc = VMXWriteVMCS(VMX_VMCS_HOST_TR_BASE, trBase);
553 AssertRC(rc);
554 Log2(("VMX_VMCS_HOST_TR_BASE %VHv\n", trBase));
555
556 /* FS and GS base. */
557#if HC_ARCH_BITS == 64
558 Log2(("MSR_K8_FS_BASE = %VHv\n", ASMRdMsr(MSR_K8_FS_BASE)));
559 Log2(("MSR_K8_GS_BASE = %VHv\n", ASMRdMsr(MSR_K8_GS_BASE)));
560 rc = VMXWriteVMCS64(VMX_VMCS_HOST_FS_BASE, ASMRdMsr(MSR_K8_FS_BASE));
561 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_GS_BASE, ASMRdMsr(MSR_K8_GS_BASE));
562#endif
563 AssertRC(rc);
564
565 /* Sysenter MSRs. */
566 /** @todo expensive!! */
567 rc = VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
568 Log2(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)));
569#if HC_ARCH_BITS == 32
570 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
571 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
572 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
573 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
574#else
575 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", ASMRdMsr(MSR_IA32_SYSENTER_EIP)));
576 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", ASMRdMsr(MSR_IA32_SYSENTER_ESP)));
577 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
578 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
579#endif
580 AssertRC(rc);
581
582 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
583 }
584 return rc;
585}
586
587
588/**
589 * Loads the guest state
590 *
591 * @returns VBox status code.
592 * @param pVM The VM to operate on.
593 * @param pCtx Guest context
594 */
595HWACCMR0DECL(int) VMXR0LoadGuestState(PVM pVM, CPUMCTX *pCtx)
596{
597 int rc = VINF_SUCCESS;
598 RTGCUINTPTR val;
599 X86EFLAGS eflags;
600
601 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
602 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
603 {
604 VMX_WRITE_SELREG(ES, es);
605 AssertRC(rc);
606
607 VMX_WRITE_SELREG(CS, cs);
608 AssertRC(rc);
609
610 VMX_WRITE_SELREG(SS, ss);
611 AssertRC(rc);
612
613 VMX_WRITE_SELREG(DS, ds);
614 AssertRC(rc);
615
616 VMX_WRITE_SELREG(FS, fs);
617 AssertRC(rc);
618
619 VMX_WRITE_SELREG(GS, gs);
620 AssertRC(rc);
621 }
622
623 /* Guest CPU context: LDTR. */
624 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
625 {
626 if (pCtx->ldtr == 0)
627 {
628 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_LDTR, 0);
629 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_LIMIT, 0);
630 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_BASE, 0);
631 /** @note vmlaunch will fail with 0 or just 0x02. No idea why. */
632 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x82 /* present, LDT */);
633 }
634 else
635 {
636 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_LDTR, pCtx->ldtr);
637 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_LIMIT, pCtx->ldtrHid.u32Limit);
638 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtrHid.u32Base);
639 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_ACCESS_RIGHTS, pCtx->ldtrHid.Attr.u);
640 }
641 AssertRC(rc);
642 }
643 /* Guest CPU context: TR. */
644 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
645 {
646 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_TR, pCtx->tr);
647
648 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
649 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
650 {
651 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_LIMIT, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
652 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_BASE, 0);
653 }
654 else
655 {
656 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_LIMIT, pCtx->trHid.u32Limit);
657 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_BASE, pCtx->trHid.u32Base);
658 }
659 val = pCtx->trHid.Attr.u;
660
661 /* The TSS selector must be busy. */
662 if ((val & 0xF) == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
663 val = (val & ~0xF) | X86_SEL_TYPE_SYS_286_TSS_BUSY;
664 else
665 /* Default even if no TR selector has been set (otherwise vmlaunch will fail!) */
666 val = (val & ~0xF) | X86_SEL_TYPE_SYS_386_TSS_BUSY;
667
668 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_ACCESS_RIGHTS, val);
669 AssertRC(rc);
670 }
671 /* Guest CPU context: GDTR. */
672 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
673 {
674 rc = VMXWriteVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
675 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt);
676 AssertRC(rc);
677 }
678 /* Guest CPU context: IDTR. */
679 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
680 {
681 rc = VMXWriteVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
682 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt);
683 AssertRC(rc);
684 }
685
686 /*
687 * Sysenter MSRs
688 */
689 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SYSENTER_MSR)
690 {
691 rc = VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
692 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
693 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
694 AssertRC(rc);
695 }
696
697 /* Control registers */
698 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
699 {
700 val = pCtx->cr0;
701 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, val);
702 Log2(("Guest CR0-shadow %08x\n", val));
703 if (CPUMIsGuestFPUStateActive(pVM) == false)
704 {
705 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
706 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
707 }
708 else
709 {
710 Assert(pVM->hwaccm.s.vmx.fResumeVM == true);
711 /** @todo check if we support the old style mess correctly. */
712 if (!(val & X86_CR0_NE))
713 {
714 Log(("Forcing X86_CR0_NE!!!\n"));
715
716 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
717 if (!pVM->hwaccm.s.fFPUOldStyleOverride)
718 {
719 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, HWACCM_VMX_TRAP_MASK | RT_BIT(16));
720 AssertRC(rc);
721 pVM->hwaccm.s.fFPUOldStyleOverride = true;
722 }
723 }
724
725 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
726 }
727 /* Note: protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */
728 val |= X86_CR0_PE | X86_CR0_PG;
729
730 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_CR0, val);
731 Log2(("Guest CR0 %08x\n", val));
732 /* CR0 flags owned by the host; if the guests attempts to change them, then
733 * the VM will exit.
734 */
735 val = X86_CR0_PE /* Must monitor this bit (assumptions are made for real mode emulation) */
736 | X86_CR0_WP /** @todo do we care? (we do if we start patching the guest) */
737 | X86_CR0_PG /* Must monitor this bit (assumptions are made for real mode & protected mode without paging emulation) */
738 | X86_CR0_TS
739 | X86_CR0_ET
740 | X86_CR0_NE
741 | X86_CR0_MP;
742 pVM->hwaccm.s.vmx.cr0_mask = val;
743
744 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR0_MASK, val);
745 Log2(("Guest CR0-mask %08x\n", val));
746 AssertRC(rc);
747 }
748 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
749 {
750 /* CR4 */
751 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, pCtx->cr4);
752 Log2(("Guest CR4-shadow %08x\n", pCtx->cr4));
753 /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */
754 val = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
755 switch(pVM->hwaccm.s.enmShadowMode)
756 {
757 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */
758 case PGMMODE_PROTECTED: /* Protected mode, no paging -> emulated using identity mapping. */
759 case PGMMODE_32_BIT: /* 32-bit paging. */
760 break;
761
762 case PGMMODE_PAE: /* PAE paging. */
763 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
764 /** @todo use normal 32 bits paging */
765 val |= X86_CR4_PAE;
766 break;
767
768 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
769 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
770 AssertFailed();
771 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
772
773 default: /* shut up gcc */
774 AssertFailed();
775 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
776 }
777 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
778 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
779 val |= X86_CR4_VME;
780
781 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_CR4, val);
782 Log2(("Guest CR4 %08x\n", val));
783 /* CR4 flags owned by the host; if the guests attempts to change them, then
784 * the VM will exit.
785 */
786 val = X86_CR4_PAE
787 | X86_CR4_PGE
788 | X86_CR4_PSE
789 | X86_CR4_VMXE;
790 pVM->hwaccm.s.vmx.cr4_mask = val;
791
792 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR4_MASK, val);
793 Log2(("Guest CR4-mask %08x\n", val));
794 AssertRC(rc);
795 }
796
797 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
798 {
799 /* Save our shadow CR3 register. */
800 val = PGMGetHyperCR3(pVM);
801 rc = VMXWriteVMCS(VMX_VMCS_GUEST_CR3, val);
802 AssertRC(rc);
803 }
804
805 /* Debug registers. */
806 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
807 {
808 /** @todo DR0-6 */
809 val = pCtx->dr7;
810 val &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
811 val |= 0x400; /* must be one */
812#ifdef VBOX_STRICT
813 val = 0x400;
814#endif
815 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DR7, val);
816 AssertRC(rc);
817
818 /* IA32_DEBUGCTL MSR. */
819 rc = VMXWriteVMCS(VMX_VMCS_GUEST_DEBUGCTL_FULL, 0);
820 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUGCTL_HIGH, 0);
821 AssertRC(rc);
822
823 /** @todo */
824 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS, 0);
825 AssertRC(rc);
826 }
827
828 /* EIP, ESP and EFLAGS */
829 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RIP, pCtx->eip);
830 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_RSP, pCtx->esp);
831 AssertRC(rc);
832
833 /* Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1. */
834 eflags = pCtx->eflags;
835 eflags.u32 &= VMX_EFLAGS_RESERVED_0;
836 eflags.u32 |= VMX_EFLAGS_RESERVED_1;
837
838 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
839 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
840 {
841 eflags.Bits.u1VM = 1;
842 eflags.Bits.u1VIF = pCtx->eflags.Bits.u1IF;
843 eflags.Bits.u2IOPL = 3;
844 }
845
846 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RFLAGS, eflags.u32);
847 AssertRC(rc);
848
849 /** TSC offset. */
850 uint64_t u64TSCOffset;
851
852 if (TMCpuTickCanUseRealTSC(pVM, &u64TSCOffset))
853 {
854 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET */
855#if HC_ARCH_BITS == 64
856 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, u64TSCOffset);
857#else
858 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, (uint32_t)u64TSCOffset);
859 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_HIGH, (uint32_t)(u64TSCOffset >> 32ULL));
860#endif
861 AssertRC(rc);
862
863 pVM->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
864 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
865 AssertRC(rc);
866 }
867 else
868 {
869 pVM->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
870 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
871 AssertRC(rc);
872 }
873
874 /* Done. */
875 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
876
877 return rc;
878}
879
880/**
881 * Runs guest code in a VMX VM.
882 *
883 * @note NEVER EVER turn on interrupts here. Due to our illegal entry into the kernel, it might mess things up. (XP kernel traps have been frequently observed)
884 *
885 * @returns VBox status code.
886 * @param pVM The VM to operate on.
887 * @param pCtx Guest context
888 */
889HWACCMR0DECL(int) VMXR0RunGuestCode(PVM pVM, CPUMCTX *pCtx)
890{
891 int rc = VINF_SUCCESS;
892 RTCCUINTREG val, valShadow;
893 RTCCUINTREG exitReason, instrError, cbInstr;
894 RTGCUINTPTR exitQualification;
895 RTGCUINTPTR intInfo = 0; /* shut up buggy gcc 4 */
896 RTGCUINTPTR errCode, instrInfo, uInterruptState;
897 bool fGuestStateSynced = false;
898 unsigned cResume = 0;
899
900 Log2(("\nE"));
901
902 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatEntry, x);
903
904#ifdef VBOX_STRICT
905 rc = VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
906 AssertRC(rc);
907 Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n", val));
908
909 /* allowed zero */
910 if ((val & (pVM->hwaccm.s.vmx.msr.vmx_pin_ctls & 0xFFFFFFFF)) != (pVM->hwaccm.s.vmx.msr.vmx_pin_ctls & 0xFFFFFFFF))
911 {
912 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n"));
913 }
914 /* allowed one */
915 if ((val & ~(pVM->hwaccm.s.vmx.msr.vmx_pin_ctls >> 32ULL)) != 0)
916 {
917 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n"));
918 }
919
920 rc = VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
921 AssertRC(rc);
922 Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n", val));
923
924 /* allowed zero */
925 if ((val & (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & 0xFFFFFFFF)) != (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls & 0xFFFFFFFF))
926 {
927 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n"));
928 }
929 /* allowed one */
930 if ((val & ~(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls >> 32ULL)) != 0)
931 {
932 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n"));
933 }
934
935 rc = VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
936 AssertRC(rc);
937 Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n", val));
938
939 /* allowed zero */
940 if ((val & (pVM->hwaccm.s.vmx.msr.vmx_entry & 0xFFFFFFFF)) != (pVM->hwaccm.s.vmx.msr.vmx_entry & 0xFFFFFFFF))
941 {
942 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n"));
943 }
944 /* allowed one */
945 if ((val & ~(pVM->hwaccm.s.vmx.msr.vmx_entry >> 32ULL)) != 0)
946 {
947 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n"));
948 }
949
950 rc = VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
951 AssertRC(rc);
952 Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n", val));
953
954 /* allowed zero */
955 if ((val & (pVM->hwaccm.s.vmx.msr.vmx_exit & 0xFFFFFFFF)) != (pVM->hwaccm.s.vmx.msr.vmx_exit & 0xFFFFFFFF))
956 {
957 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n"));
958 }
959 /* allowed one */
960 if ((val & ~(pVM->hwaccm.s.vmx.msr.vmx_exit >> 32ULL)) != 0)
961 {
962 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n"));
963 }
964#endif
965
966#if 0
967 /*
968 * Check if debug registers are armed.
969 */
970 uint32_t u32DR7 = ASMGetDR7();
971 if (u32DR7 & X86_DR7_ENABLED_MASK)
972 {
973 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
974 }
975 else
976 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HOST;
977#endif
978
979 /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
980 */
981ResumeExecution:
982 /* Safety precaution; looping for too long here can have a very bad effect on the host */
983 if (++cResume > HWACCM_MAX_RESUME_LOOPS)
984 {
985 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitMaxResume);
986 rc = VINF_EM_RAW_INTERRUPT;
987 goto end;
988 }
989
990 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
991 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
992 {
993 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", pCtx->eip, EMGetInhibitInterruptsPC(pVM)));
994 if (pCtx->eip != EMGetInhibitInterruptsPC(pVM))
995 {
996 /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
997 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
998 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
999 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
1000 */
1001 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1002 /* Irq inhibition is no longer active; clear the corresponding VMX state. */
1003 rc = VMXWriteVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, 0);
1004 AssertRC(rc);
1005 }
1006 }
1007 else
1008 {
1009 /* Irq inhibition is no longer active; clear the corresponding VMX state. */
1010 rc = VMXWriteVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, 0);
1011 AssertRC(rc);
1012 }
1013
1014 /* Check for pending actions that force us to go back to ring 3. */
1015 if (VM_FF_ISPENDING(pVM, VM_FF_TO_R3 | VM_FF_TIMER))
1016 {
1017 VM_FF_CLEAR(pVM, VM_FF_TO_R3);
1018 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchToR3);
1019 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1020 rc = VINF_EM_RAW_TO_R3;
1021 goto end;
1022 }
1023 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
1024 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
1025 {
1026 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1027 rc = VINF_EM_PENDING_REQUEST;
1028 goto end;
1029 }
1030
1031 /* When external interrupts are pending, we should exit the VM when IF is set. */
1032 /** @note *after* VM_FF_INHIBIT_INTERRUPTS check!!! */
1033 rc = VMXR0CheckPendingInterrupt(pVM, pCtx);
1034 if (VBOX_FAILURE(rc))
1035 {
1036 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1037 goto end;
1038 }
1039
1040 /** @todo check timers?? */
1041
1042 /* Save the host state first. */
1043 rc = VMXR0SaveHostState(pVM);
1044 if (rc != VINF_SUCCESS)
1045 {
1046 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1047 goto end;
1048 }
1049 /* Load the guest state */
1050 rc = VMXR0LoadGuestState(pVM, pCtx);
1051 if (rc != VINF_SUCCESS)
1052 {
1053 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1054 goto end;
1055 }
1056 fGuestStateSynced = true;
1057
1058 /* Non-register state Guest Context */
1059 /** @todo change me according to cpu state */
1060 rc = VMXWriteVMCS(VMX_VMCS_GUEST_ACTIVITY_STATE, VMX_CMS_GUEST_ACTIVITY_ACTIVE);
1061 AssertRC(rc);
1062
1063 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1064
1065 /* Manual save and restore:
1066 * - General purpose registers except RIP, RSP
1067 *
1068 * Trashed:
1069 * - CR2 (we don't care)
1070 * - LDTR (reset to 0)
1071 * - DRx (presumably not changed at all)
1072 * - DR7 (reset to 0x400)
1073 * - EFLAGS (reset to RT_BIT(1); not relevant)
1074 *
1075 */
1076
1077 /* All done! Let's start VM execution. */
1078 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
1079 if (pVM->hwaccm.s.vmx.fResumeVM == false)
1080 rc = VMXStartVM(pCtx);
1081 else
1082 rc = VMXResumeVM(pCtx);
1083
1084 /* In case we execute a goto ResumeExecution later on. */
1085 pVM->hwaccm.s.vmx.fResumeVM = true;
1086
1087 /**
1088 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1089 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
1090 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1091 */
1092
1093 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatInGC, x);
1094 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatExit, x);
1095
1096 switch (rc)
1097 {
1098 case VINF_SUCCESS:
1099 break;
1100
1101 case VERR_VMX_INVALID_VMXON_PTR:
1102 AssertFailed();
1103 goto end;
1104
1105 case VERR_VMX_UNABLE_TO_START_VM:
1106 case VERR_VMX_UNABLE_TO_RESUME_VM:
1107 {
1108#ifdef VBOX_STRICT
1109 int rc1;
1110
1111 rc1 = VMXReadVMCS(VMX_VMCS_RO_EXIT_REASON, &exitReason);
1112 rc1 |= VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
1113 AssertRC(rc1);
1114 if (rc1 == VINF_SUCCESS)
1115 {
1116 RTGDTR gdtr;
1117 PX86DESCHC pDesc;
1118
1119 ASMGetGDTR(&gdtr);
1120
1121 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError));
1122 Log(("Current stack %08x\n", &rc1));
1123
1124
1125 VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val);
1126 Log(("Old eip %VGv new %VGv\n", pCtx->eip, (RTGCPTR)val));
1127 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
1128 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val));
1129 VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
1130 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val));
1131 VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
1132 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val));
1133 VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
1134 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val));
1135
1136 VMXReadVMCS(VMX_VMCS_HOST_CR0, &val);
1137 Log(("VMX_VMCS_HOST_CR0 %08x\n", val));
1138
1139 VMXReadVMCS(VMX_VMCS_HOST_CR3, &val);
1140 Log(("VMX_VMCS_HOST_CR3 %VHp\n", val));
1141
1142 VMXReadVMCS(VMX_VMCS_HOST_CR4, &val);
1143 Log(("VMX_VMCS_HOST_CR4 %08x\n", val));
1144
1145 VMXReadVMCS(VMX_VMCS_HOST_FIELD_CS, &val);
1146 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val));
1147 if (val < gdtr.cbGdt)
1148 {
1149 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1150 HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
1151 }
1152
1153 VMXReadVMCS(VMX_VMCS_HOST_FIELD_DS, &val);
1154 Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val));
1155 if (val < gdtr.cbGdt)
1156 {
1157 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1158 HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
1159 }
1160
1161 VMXReadVMCS(VMX_VMCS_HOST_FIELD_ES, &val);
1162 Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val));
1163 if (val < gdtr.cbGdt)
1164 {
1165 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1166 HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
1167 }
1168
1169 VMXReadVMCS(VMX_VMCS_HOST_FIELD_FS, &val);
1170 Log(("VMX_VMCS_HOST_FIELD_FS %08x\n", val));
1171 if (val < gdtr.cbGdt)
1172 {
1173 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1174 HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
1175 }
1176
1177 VMXReadVMCS(VMX_VMCS_HOST_FIELD_GS, &val);
1178 Log(("VMX_VMCS_HOST_FIELD_GS %08x\n", val));
1179 if (val < gdtr.cbGdt)
1180 {
1181 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1182 HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
1183 }
1184
1185 VMXReadVMCS(VMX_VMCS_HOST_FIELD_SS, &val);
1186 Log(("VMX_VMCS_HOST_FIELD_SS %08x\n", val));
1187 if (val < gdtr.cbGdt)
1188 {
1189 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1190 HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
1191 }
1192
1193 VMXReadVMCS(VMX_VMCS_HOST_FIELD_TR, &val);
1194 Log(("VMX_VMCS_HOST_FIELD_TR %08x\n", val));
1195 if (val < gdtr.cbGdt)
1196 {
1197 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1198 HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
1199 }
1200
1201 VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val);
1202 Log(("VMX_VMCS_HOST_TR_BASE %VHv\n", val));
1203
1204 VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val);
1205 Log(("VMX_VMCS_HOST_GDTR_BASE %VHv\n", val));
1206 VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val);
1207 Log(("VMX_VMCS_HOST_IDTR_BASE %VHv\n", val));
1208
1209 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_CS, &val);
1210 Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val));
1211
1212 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val);
1213 Log(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", val));
1214
1215 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val);
1216 Log(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", val));
1217
1218 VMXReadVMCS(VMX_VMCS_HOST_RSP, &val);
1219 Log(("VMX_VMCS_HOST_RSP %VHv\n", val));
1220 VMXReadVMCS(VMX_VMCS_HOST_RIP, &val);
1221 Log(("VMX_VMCS_HOST_RIP %VHv\n", val));
1222
1223#if HC_ARCH_BITS == 64
1224 Log(("MSR_K6_EFER = %VX64\n", ASMRdMsr(MSR_K6_EFER)));
1225 Log(("MSR_K6_STAR = %VX64\n", ASMRdMsr(MSR_K6_STAR)));
1226 Log(("MSR_K8_LSTAR = %VX64\n", ASMRdMsr(MSR_K8_LSTAR)));
1227 Log(("MSR_K8_CSTAR = %VX64\n", ASMRdMsr(MSR_K8_CSTAR)));
1228 Log(("MSR_K8_SF_MASK = %VX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
1229#endif
1230 }
1231#endif /* VBOX_STRICT */
1232 goto end;
1233 }
1234
1235 default:
1236 /* impossible */
1237 AssertFailed();
1238 goto end;
1239 }
1240 /* Success. Query the guest state and figure out what has happened. */
1241
1242 /* Investigate why there was a VM-exit. */
1243 rc = VMXReadVMCS(VMX_VMCS_RO_EXIT_REASON, &exitReason);
1244 STAM_COUNTER_INC(&pVM->hwaccm.s.pStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);
1245
1246 exitReason &= 0xffff; /* bit 0-15 contain the exit code. */
1247 rc |= VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
1248 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INSTR_LENGTH, &cbInstr);
1249 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INTERRUPTION_INFO, &val);
1250 intInfo = val;
1251 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INTERRUPTION_ERRCODE, &val);
1252 errCode = val; /* might not be valid; depends on VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID. */
1253 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INSTR_INFO, &val);
1254 instrInfo = val;
1255 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_QUALIFICATION, &val);
1256 exitQualification = val;
1257 AssertRC(rc);
1258
1259 /* Take care of instruction fusing (sti, mov ss) */
1260 rc |= VMXReadVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, &val);
1261 uInterruptState = val;
1262 if (uInterruptState != 0)
1263 {
1264 Assert(uInterruptState <= 2); /* only sti & mov ss */
1265 Log(("uInterruptState %x eip=%VGv\n", uInterruptState, pCtx->eip));
1266 EMSetInhibitInterruptsPC(pVM, pCtx->eip);
1267 }
1268 else
1269 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1270
1271 /* Let's first sync back eip, esp, and eflags. */
1272 rc = VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val);
1273 AssertRC(rc);
1274 pCtx->eip = val;
1275 rc = VMXReadVMCS(VMX_VMCS_GUEST_RSP, &val);
1276 AssertRC(rc);
1277 pCtx->esp = val;
1278 rc = VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val);
1279 AssertRC(rc);
1280 pCtx->eflags.u32 = val;
1281
1282 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
1283 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
1284 {
1285 /* Hide our emulation flags */
1286 pCtx->eflags.Bits.u1VM = 0;
1287 pCtx->eflags.Bits.u1IF = pCtx->eflags.Bits.u1VIF;
1288 pCtx->eflags.Bits.u1VIF = 0;
1289 pCtx->eflags.Bits.u2IOPL = 0;
1290 }
1291
1292 /* Control registers. */
1293 VMXReadVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow);
1294 VMXReadVMCS(VMX_VMCS_GUEST_CR0, &val);
1295 val = (valShadow & pVM->hwaccm.s.vmx.cr0_mask) | (val & ~pVM->hwaccm.s.vmx.cr0_mask);
1296 CPUMSetGuestCR0(pVM, val);
1297
1298 VMXReadVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow);
1299 VMXReadVMCS(VMX_VMCS_GUEST_CR4, &val);
1300 val = (valShadow & pVM->hwaccm.s.vmx.cr4_mask) | (val & ~pVM->hwaccm.s.vmx.cr4_mask);
1301 CPUMSetGuestCR4(pVM, val);
1302
1303 CPUMSetGuestCR2(pVM, ASMGetCR2());
1304
1305 VMXReadVMCS(VMX_VMCS_GUEST_DR7, &val);
1306 CPUMSetGuestDR7(pVM, val);
1307
1308 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
1309 VMX_READ_SELREG(ES, es);
1310 VMX_READ_SELREG(SS, ss);
1311 VMX_READ_SELREG(CS, cs);
1312 VMX_READ_SELREG(DS, ds);
1313 VMX_READ_SELREG(FS, fs);
1314 VMX_READ_SELREG(GS, gs);
1315
1316 /** @note NOW IT'S SAFE FOR LOGGING! */
1317 Log2(("Raw exit reason %08x\n", exitReason));
1318
1319 /* Check if an injected event was interrupted prematurely. */
1320 rc = VMXReadVMCS(VMX_VMCS_RO_IDT_INFO, &val);
1321 AssertRC(rc);
1322 pVM->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
1323 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVM->hwaccm.s.Event.intInfo)
1324 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVM->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW)
1325 {
1326 Log(("Pending inject %VX64 at %08x exit=%08x intInfo=%08x exitQualification=%08x\n", pVM->hwaccm.s.Event.intInfo, pCtx->eip, exitReason, intInfo, exitQualification));
1327 pVM->hwaccm.s.Event.fPending = true;
1328 /* Error code present? */
1329 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVM->hwaccm.s.Event.intInfo))
1330 {
1331 rc = VMXReadVMCS(VMX_VMCS_RO_IDT_ERRCODE, &val);
1332 AssertRC(rc);
1333 pVM->hwaccm.s.Event.errCode = val;
1334 }
1335 else
1336 pVM->hwaccm.s.Event.errCode = 0;
1337 }
1338
1339#ifdef VBOX_STRICT
1340 if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE)
1341 HWACCMDumpRegs(pCtx);
1342#endif
1343
1344 Log2(("E%d", exitReason));
1345 Log2(("Exit reason %d, exitQualification %08x\n", exitReason, exitQualification));
1346 Log2(("instrInfo=%d instrError=%d instr length=%d\n", instrInfo, instrError, cbInstr));
1347 Log2(("Interruption error code %d\n", errCode));
1348 Log2(("IntInfo = %08x\n", intInfo));
1349 Log2(("New EIP=%VGv\n", pCtx->eip));
1350
1351 /* Some cases don't need a complete resync of the guest CPU state; handle them here. */
1352 switch (exitReason)
1353 {
1354 case VMX_EXIT_EXCEPTION: /* 0 Exception or non-maskable interrupt (NMI). */
1355 case VMX_EXIT_EXTERNAL_IRQ: /* 1 External interrupt. */
1356 {
1357 uint32_t vector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(intInfo);
1358
1359 if (!VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
1360 {
1361 Assert(exitReason == VMX_EXIT_EXTERNAL_IRQ);
1362 /* External interrupt; leave to allow it to be dispatched again. */
1363 rc = VINF_EM_RAW_INTERRUPT;
1364 break;
1365 }
1366 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo))
1367 {
1368 case VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI: /* Non-maskable interrupt. */
1369 /* External interrupt; leave to allow it to be dispatched again. */
1370 rc = VINF_EM_RAW_INTERRUPT;
1371 break;
1372
1373 case VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT: /* External hardware interrupt. */
1374 AssertFailed(); /* can't come here; fails the first check. */
1375 break;
1376
1377 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT: /* Software exception. (#BP or #OF) */
1378 Assert(vector == 3 || vector == 4);
1379 /* no break */
1380 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT: /* Hardware exception. */
1381 Log2(("Hardware/software interrupt %d\n", vector));
1382 switch (vector)
1383 {
1384 case X86_XCPT_NM:
1385 {
1386 uint32_t oldCR0;
1387
1388 Log(("#NM fault at %VGv error code %x\n", pCtx->eip, errCode));
1389
1390 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
1391 oldCR0 = ASMGetCR0();
1392 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
1393 rc = CPUMHandleLazyFPU(pVM);
1394 if (rc == VINF_SUCCESS)
1395 {
1396 Assert(CPUMIsGuestFPUStateActive(pVM));
1397
1398 /* CPUMHandleLazyFPU could have changed CR0; restore it. */
1399 ASMSetCR0(oldCR0);
1400
1401 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowNM);
1402
1403 /* Continue execution. */
1404 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1405 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1406
1407 goto ResumeExecution;
1408 }
1409
1410 Log(("Forward #NM fault to the guest\n"));
1411 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM);
1412 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0);
1413 AssertRC(rc);
1414 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1415 goto ResumeExecution;
1416 }
1417
1418 case X86_XCPT_PF: /* Page fault */
1419 {
1420 Log2(("Page fault at %VGv error code %x\n", exitQualification ,errCode));
1421 /* Exit qualification contains the linear address of the page fault. */
1422 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1423 TRPMSetErrorCode(pVM, errCode);
1424 TRPMSetFaultAddress(pVM, exitQualification);
1425
1426 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
1427 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification);
1428 Log2(("PGMTrap0eHandler %VGv returned %Vrc\n", pCtx->eip, rc));
1429 if (rc == VINF_SUCCESS)
1430 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1431 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", pCtx->eip, exitQualification ,errCode));
1432 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1433
1434 TRPMResetTrap(pVM);
1435
1436 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1437 goto ResumeExecution;
1438 }
1439 else
1440 if (rc == VINF_EM_RAW_GUEST_TRAP)
1441 { /* A genuine pagefault.
1442 * Forward the trap to the guest by injecting the exception and resuming execution.
1443 */
1444 Log2(("Forward page fault to the guest\n"));
1445 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1446 /* The error code might have been changed. */
1447 errCode = TRPMGetErrorCode(pVM);
1448
1449 TRPMResetTrap(pVM);
1450
1451 /* Now we must update CR2. */
1452 pCtx->cr2 = exitQualification;
1453 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1454 AssertRC(rc);
1455
1456 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1457 goto ResumeExecution;
1458 }
1459#ifdef VBOX_STRICT
1460 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1461 Log2(("PGMTrap0eHandler failed with %d\n", rc));
1462#endif
1463 /* Need to go back to the recompiler to emulate the instruction. */
1464 TRPMResetTrap(pVM);
1465 break;
1466 }
1467
1468 case X86_XCPT_MF: /* Floating point exception. */
1469 {
1470 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestMF);
1471 if (!(pCtx->cr0 & X86_CR0_NE))
1472 {
1473 /* old style FPU error reporting needs some extra work. */
1474 /** @todo don't fall back to the recompiler, but do it manually. */
1475 rc = VINF_EM_RAW_EMULATE_INSTR;
1476 break;
1477 }
1478 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1479 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1480 AssertRC(rc);
1481
1482 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1483 goto ResumeExecution;
1484 }
1485
1486#ifdef VBOX_STRICT
1487 case X86_XCPT_GP: /* General protection failure exception.*/
1488 case X86_XCPT_UD: /* Unknown opcode exception. */
1489 case X86_XCPT_DE: /* Debug exception. */
1490 case X86_XCPT_SS: /* Stack segment exception. */
1491 case X86_XCPT_NP: /* Segment not present exception. */
1492 {
1493 switch(vector)
1494 {
1495 case X86_XCPT_DE:
1496 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestDE);
1497 break;
1498 case X86_XCPT_UD:
1499 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestUD);
1500 break;
1501 case X86_XCPT_SS:
1502 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestSS);
1503 break;
1504 case X86_XCPT_NP:
1505 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNP);
1506 break;
1507 case X86_XCPT_GP:
1508 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestGP);
1509 break;
1510 }
1511
1512 Log(("Trap %x at %VGv\n", vector, pCtx->eip));
1513 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1514 AssertRC(rc);
1515
1516 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1517 goto ResumeExecution;
1518 }
1519#endif
1520 default:
1521 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
1522 rc = VERR_EM_INTERNAL_ERROR;
1523 break;
1524 } /* switch (vector) */
1525
1526 break;
1527
1528 default:
1529 rc = VERR_EM_INTERNAL_ERROR;
1530 AssertFailed();
1531 break;
1532 }
1533
1534 break;
1535 }
1536
1537 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */
1538 /* Clear VM-exit on IF=1 change. */
1539 Log2(("VMX_EXIT_IRQ_WINDOW %VGv\n", pCtx->eip));
1540 pVM->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
1541 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
1542 AssertRC(rc);
1543 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIrqWindow);
1544 goto ResumeExecution; /* we check for pending guest interrupts there */
1545
1546 case VMX_EXIT_INVD: /* 13 Guest software attempted to execute INVD. */
1547 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvd);
1548 /* Skip instruction and continue directly. */
1549 pCtx->eip += cbInstr;
1550 /* Continue execution.*/
1551 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1552 goto ResumeExecution;
1553
1554 case VMX_EXIT_CPUID: /* 10 Guest software attempted to execute CPUID. */
1555 {
1556 Log2(("VMX: Cpuid %x\n", pCtx->eax));
1557 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCpuid);
1558 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));
1559 if (rc == VINF_SUCCESS)
1560 {
1561 /* Update EIP and continue execution. */
1562 Assert(cbInstr == 2);
1563 pCtx->eip += cbInstr;
1564 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1565 goto ResumeExecution;
1566 }
1567 AssertMsgFailed(("EMU: cpuid failed with %Vrc\n", rc));
1568 rc = VINF_EM_RAW_EMULATE_INSTR;
1569 break;
1570 }
1571
1572 case VMX_EXIT_RDTSC: /* 16 Guest software attempted to execute RDTSC. */
1573 {
1574 Log2(("VMX: Rdtsc\n"));
1575 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitRdtsc);
1576 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));
1577 if (rc == VINF_SUCCESS)
1578 {
1579 /* Update EIP and continue execution. */
1580 Assert(cbInstr == 2);
1581 pCtx->eip += cbInstr;
1582 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1583 goto ResumeExecution;
1584 }
1585 AssertMsgFailed(("EMU: rdtsc failed with %Vrc\n", rc));
1586 rc = VINF_EM_RAW_EMULATE_INSTR;
1587 break;
1588 }
1589
1590 case VMX_EXIT_INVPG: /* 14 Guest software attempted to execute INVPG. */
1591 {
1592 Log2(("VMX: invlpg\n"));
1593 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg);
1594 rc = EMInterpretInvlpg(pVM, CPUMCTX2CORE(pCtx), exitQualification);
1595 if (rc == VINF_SUCCESS)
1596 {
1597 /* Update EIP and continue execution. */
1598 pCtx->eip += cbInstr;
1599 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1600 goto ResumeExecution;
1601 }
1602 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: invlpg %VGv failed with %Vrc\n", exitQualification, rc));
1603 break;
1604 }
1605
1606 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */
1607 {
1608 switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification))
1609 {
1610 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:
1611 Log2(("VMX: %VGv mov cr%d, x\n", pCtx->eip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
1612 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite);
1613 rc = EMInterpretCRxWrite(pVM, CPUMCTX2CORE(pCtx),
1614 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
1615 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification));
1616
1617 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))
1618 {
1619 case 0:
1620 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1621 break;
1622 case 2:
1623 break;
1624 case 3:
1625 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
1626 break;
1627 case 4:
1628 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
1629 break;
1630 default:
1631 AssertFailed();
1632 }
1633 /* Check if a sync operation is pending. */
1634 if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
1635 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1636 {
1637 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
1638 AssertRC(rc);
1639 }
1640 break;
1641
1642 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:
1643 Log2(("VMX: mov x, crx\n"));
1644 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead);
1645 rc = EMInterpretCRxRead(pVM, CPUMCTX2CORE(pCtx),
1646 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification),
1647 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification));
1648 break;
1649
1650 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:
1651 Log2(("VMX: clts\n"));
1652 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCLTS);
1653 rc = EMInterpretCLTS(pVM);
1654 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1655 break;
1656
1657 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:
1658 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
1659 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitLMSW);
1660 rc = EMInterpretLMSW(pVM, VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
1661 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1662 break;
1663 }
1664
1665 /* Update EIP if no error occurred. */
1666 if (VBOX_SUCCESS(rc))
1667 pCtx->eip += cbInstr;
1668
1669 if (rc == VINF_SUCCESS)
1670 {
1671 /* Only resume if successful. */
1672 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1673 goto ResumeExecution;
1674 }
1675 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1676 break;
1677 }
1678
1679 case VMX_EXIT_DRX_MOVE: /* 29 Debug-register accesses. */
1680 {
1681 /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first time and restore drx registers afterwards */
1682 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
1683 {
1684 Log2(("VMX: mov drx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
1685 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxWrite);
1686 rc = EMInterpretDRxWrite(pVM, CPUMCTX2CORE(pCtx),
1687 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
1688 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification));
1689 Log2(("DR7=%08x\n", pCtx->dr7));
1690 }
1691 else
1692 {
1693 Log2(("VMX: mov x, drx\n"));
1694 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1695 rc = EMInterpretDRxRead(pVM, CPUMCTX2CORE(pCtx),
1696 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification),
1697 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification));
1698 }
1699 /* Update EIP if no error occurred. */
1700 if (VBOX_SUCCESS(rc))
1701 pCtx->eip += cbInstr;
1702
1703 if (rc == VINF_SUCCESS)
1704 {
1705 /* Only resume if successful. */
1706 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1707 goto ResumeExecution;
1708 }
1709 Assert(rc == VERR_EM_INTERPRETER);
1710 break;
1711 }
1712
1713 /** @note We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
1714 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */
1715 {
1716 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification);
1717 uint32_t uPort;
1718 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
1719
1720 /** @todo necessary to make the distinction? */
1721 if (VMX_EXIT_QUALIFICATION_IO_ENCODING(exitQualification) == VMX_EXIT_QUALIFICATION_IO_ENCODING_DX)
1722 {
1723 uPort = pCtx->edx & 0xffff;
1724 }
1725 else
1726 uPort = VMX_EXIT_QUALIFICATION_IO_PORT(exitQualification); /* Immediate encoding. */
1727
1728 /* paranoia */
1729 if (RT_UNLIKELY(uIOWidth == 2 || uIOWidth >= 4))
1730 {
1731 rc = fIOWrite ? VINF_IOM_HC_IOPORT_WRITE : VINF_IOM_HC_IOPORT_READ;
1732 break;
1733 }
1734
1735 uint32_t cbSize = aIOSize[uIOWidth];
1736
1737 if (VMX_EXIT_QUALIFICATION_IO_STRING(exitQualification))
1738 {
1739 /* ins/outs */
1740 uint32_t prefix = 0;
1741 if (VMX_EXIT_QUALIFICATION_IO_REP(exitQualification))
1742 prefix |= PREFIX_REP;
1743
1744 if (fIOWrite)
1745 {
1746 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->eip, uPort, cbSize));
1747 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite);
1748 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, prefix, cbSize);
1749 }
1750 else
1751 {
1752 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->eip, uPort, cbSize));
1753 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead);
1754 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, prefix, cbSize);
1755 }
1756 }
1757 else
1758 {
1759 /* normal in/out */
1760 uint32_t uAndVal = aIOOpAnd[uIOWidth];
1761
1762 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(exitQualification));
1763
1764 if (fIOWrite)
1765 {
1766 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOWrite);
1767 rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize);
1768 }
1769 else
1770 {
1771 uint32_t u32Val = 0;
1772
1773 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIORead);
1774 rc = IOMIOPortRead(pVM, uPort, &u32Val, cbSize);
1775 if (IOM_SUCCESS(rc))
1776 {
1777 /* Write back to the EAX register. */
1778 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
1779 }
1780 }
1781 }
1782 /*
1783 * Handled the I/O return codes.
1784 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1785 */
1786 if (IOM_SUCCESS(rc))
1787 {
1788 /* Update EIP and continue execution. */
1789 pCtx->eip += cbInstr;
1790 if (RT_LIKELY(rc == VINF_SUCCESS))
1791 {
1792 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1793 goto ResumeExecution;
1794 }
1795 break;
1796 }
1797
1798#ifdef VBOX_STRICT
1799 if (rc == VINF_IOM_HC_IOPORT_READ)
1800 Assert(!fIOWrite);
1801 else if (rc == VINF_IOM_HC_IOPORT_WRITE)
1802 Assert(fIOWrite);
1803 else
1804 AssertMsg(VBOX_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Vrc\n", rc));
1805#endif
1806 break;
1807 }
1808
1809 default:
1810 /* The rest is handled after syncing the entire CPU state. */
1811 break;
1812 }
1813
1814 /* Note: the guest state isn't entirely synced back at this stage. */
1815
1816 /* Investigate why there was a VM-exit. (part 2) */
1817 switch (exitReason)
1818 {
1819 case VMX_EXIT_EXCEPTION: /* 0 Exception or non-maskable interrupt (NMI). */
1820 case VMX_EXIT_EXTERNAL_IRQ: /* 1 External interrupt. */
1821 /* Already handled above. */
1822 break;
1823
1824 case VMX_EXIT_TRIPLE_FAULT: /* 2 Triple fault. */
1825 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
1826 break;
1827
1828 case VMX_EXIT_INIT_SIGNAL: /* 3 INIT signal. */
1829 case VMX_EXIT_SIPI: /* 4 Start-up IPI (SIPI). */
1830 rc = VINF_EM_RAW_INTERRUPT;
1831 AssertFailed(); /* Can't happen. Yet. */
1832 break;
1833
1834 case VMX_EXIT_IO_SMI_IRQ: /* 5 I/O system-management interrupt (SMI). */
1835 case VMX_EXIT_SMI_IRQ: /* 6 Other SMI. */
1836 rc = VINF_EM_RAW_INTERRUPT;
1837 AssertFailed(); /* Can't happen afaik. */
1838 break;
1839
1840 case VMX_EXIT_TASK_SWITCH: /* 9 Task switch. */
1841 rc = VERR_EM_INTERPRETER;
1842 break;
1843
1844 case VMX_EXIT_HLT: /* 12 Guest software attempted to execute HLT. */
1845 /** Check if external interrupts are pending; if so, don't switch back. */
1846 if (VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
1847 {
1848 pCtx->eip++; /* skip hlt */
1849 goto ResumeExecution;
1850 }
1851
1852 rc = VINF_EM_RAW_EMULATE_INSTR_HLT;
1853 break;
1854
1855 case VMX_EXIT_RSM: /* 17 Guest software attempted to execute RSM in SMM. */
1856 AssertFailed(); /* can't happen. */
1857 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1858 break;
1859
1860 case VMX_EXIT_VMCALL: /* 18 Guest software executed VMCALL. */
1861 case VMX_EXIT_VMCLEAR: /* 19 Guest software executed VMCLEAR. */
1862 case VMX_EXIT_VMLAUNCH: /* 20 Guest software executed VMLAUNCH. */
1863 case VMX_EXIT_VMPTRLD: /* 21 Guest software executed VMPTRLD. */
1864 case VMX_EXIT_VMPTRST: /* 22 Guest software executed VMPTRST. */
1865 case VMX_EXIT_VMREAD: /* 23 Guest software executed VMREAD. */
1866 case VMX_EXIT_VMRESUME: /* 24 Guest software executed VMRESUME. */
1867 case VMX_EXIT_VMWRITE: /* 25 Guest software executed VMWRITE. */
1868 case VMX_EXIT_VMXOFF: /* 26 Guest software executed VMXOFF. */
1869 case VMX_EXIT_VMXON: /* 27 Guest software executed VMXON. */
1870 /** @todo inject #UD immediately */
1871 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1872 break;
1873
1874 case VMX_EXIT_CPUID: /* 10 Guest software attempted to execute CPUID. */
1875 case VMX_EXIT_RDTSC: /* 16 Guest software attempted to execute RDTSC. */
1876 case VMX_EXIT_INVPG: /* 14 Guest software attempted to execute INVPG. */
1877 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */
1878 case VMX_EXIT_DRX_MOVE: /* 29 Debug-register accesses. */
1879 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */
1880 /* already handled above */
1881 AssertMsg( rc == VINF_PGM_CHANGE_MODE
1882 || rc == VINF_EM_RAW_INTERRUPT
1883 || rc == VERR_EM_INTERPRETER
1884 || rc == VINF_EM_RAW_EMULATE_INSTR
1885 || rc == VINF_PGM_SYNC_CR3
1886 || rc == VINF_IOM_HC_IOPORT_READ
1887 || rc == VINF_IOM_HC_IOPORT_WRITE
1888 || rc == VINF_EM_RAW_GUEST_TRAP
1889 || rc == VINF_TRPM_XCPT_DISPATCHED
1890 || rc == VINF_EM_RESCHEDULE_REM,
1891 ("rc = %d\n", rc));
1892 break;
1893
1894 case VMX_EXIT_RDPMC: /* 15 Guest software attempted to execute RDPMC. */
1895 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
1896 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */
1897 case VMX_EXIT_MWAIT: /* 36 Guest software executed MWAIT. */
1898 case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */
1899 case VMX_EXIT_PAUSE: /* 40 Guest software attempted to execute PAUSE. */
1900 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
1901 break;
1902
1903 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */
1904 Assert(rc == VINF_EM_RAW_INTERRUPT);
1905 break;
1906
1907 case VMX_EXIT_TPR: /* 43 TPR below threshold. Guest software executed MOV to CR8. */
1908 case VMX_EXIT_ERR_INVALID_GUEST_STATE: /* 33 VM-entry failure due to invalid guest state. */
1909 case VMX_EXIT_ERR_MSR_LOAD: /* 34 VM-entry failure due to MSR loading. */
1910 case VMX_EXIT_ERR_MACHINE_CHECK: /* 41 VM-entry failure due to machine-check. */
1911 default:
1912 rc = VERR_EM_INTERNAL_ERROR;
1913 AssertMsgFailed(("Unexpected exit code %d\n", exitReason)); /* Can't happen. */
1914 break;
1915
1916 }
1917end:
1918 if (fGuestStateSynced)
1919 {
1920 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR. */
1921 VMX_READ_SELREG(LDTR, ldtr);
1922 VMX_READ_SELREG(TR, tr);
1923
1924 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, &val);
1925 pCtx->gdtr.cbGdt = val;
1926 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_BASE, &val);
1927 pCtx->gdtr.pGdt = val;
1928
1929 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, &val);
1930 pCtx->idtr.cbIdt = val;
1931 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_BASE, &val);
1932 pCtx->idtr.pIdt = val;
1933
1934 /*
1935 * System MSRs
1936 */
1937 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_CS, &val);
1938 pCtx->SysEnter.cs = val;
1939 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, &val);
1940 pCtx->SysEnter.eip = val;
1941 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, &val);
1942 pCtx->SysEnter.esp = val;
1943 }
1944
1945 /* Signal changes for the recompiler. */
1946 CPUMSetChangedFlags(pVM, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
1947
1948 /* If we executed vmlaunch/vmresume and an external irq was pending, then we don't have to do a full sync the next time. */
1949 if ( exitReason == VMX_EXIT_EXTERNAL_IRQ
1950 && !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
1951 {
1952 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
1953 /* On the next entry we'll only sync the host context. */
1954 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
1955 }
1956 else
1957 {
1958 /* On the next entry we'll sync everything. */
1959 /** @todo we can do better than this */
1960 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
1961 }
1962
1963 /* translate into a less severe return code */
1964 if (rc == VERR_EM_INTERPRETER)
1965 rc = VINF_EM_RAW_EMULATE_INSTR;
1966
1967 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1968 Log2(("X"));
1969 return rc;
1970}
1971
1972
1973/**
1974 * Enters the VT-x session
1975 *
1976 * @returns VBox status code.
1977 * @param pVM The VM to operate on.
1978 */
1979HWACCMR0DECL(int) VMXR0Enter(PVM pVM)
1980{
1981 Assert(pVM->hwaccm.s.vmx.fSupported);
1982
1983 unsigned cr4 = ASMGetCR4();
1984 if (!(cr4 & X86_CR4_VMXE))
1985 {
1986 AssertMsgFailed(("X86_CR4_VMXE should be set!\n"));
1987 return VERR_VMX_X86_CR4_VMXE_CLEARED;
1988 }
1989
1990 /* Activate the VM Control Structure. */
1991 int rc = VMXActivateVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
1992 if (VBOX_FAILURE(rc))
1993 return rc;
1994
1995 pVM->hwaccm.s.vmx.fResumeVM = false;
1996 return VINF_SUCCESS;
1997}
1998
1999
2000/**
2001 * Leaves the VT-x session
2002 *
2003 * @returns VBox status code.
2004 * @param pVM The VM to operate on.
2005 */
2006HWACCMR0DECL(int) VMXR0Leave(PVM pVM)
2007{
2008 Assert(pVM->hwaccm.s.vmx.fSupported);
2009
2010 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
2011 int rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
2012 AssertRC(rc);
2013
2014 return VINF_SUCCESS;
2015}
2016
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette