VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp@ 10661

Last change on this file since 10661 was 10661, checked in by vboxsync, 16 years ago

Reduce the number of world switches caused by cr8 writes by checking if we really need to be notified. (only when
an interrupt is pending and masked by the TRP value)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id
File size: 85.5 KB
Line 
1/* $Id: HWVMXR0.cpp 10661 2008-07-15 14:21:04Z vboxsync $ */
2/** @file
3 * HWACCM VMX - Host Context Ring 0.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22
23/*******************************************************************************
24* Header Files *
25*******************************************************************************/
26#define LOG_GROUP LOG_GROUP_HWACCM
27#include <VBox/hwaccm.h>
28#include "HWACCMInternal.h"
29#include <VBox/vm.h>
30#include <VBox/x86.h>
31#include <VBox/pgm.h>
32#include <VBox/pdm.h>
33#include <VBox/err.h>
34#include <VBox/log.h>
35#include <VBox/selm.h>
36#include <VBox/iom.h>
37#include <iprt/param.h>
38#include <iprt/assert.h>
39#include <iprt/asm.h>
40#include <iprt/string.h>
41#include "HWVMXR0.h"
42
43
44/* IO operation lookup arrays. */
45static uint32_t aIOSize[4] = {1, 2, 0, 4};
46static uint32_t aIOOpAnd[4] = {0xff, 0xffff, 0, 0xffffffff};
47
48
49static void VMXR0CheckError(PVM pVM, int rc)
50{
51 if (rc == VERR_VMX_GENERIC)
52 {
53 RTCCUINTREG instrError;
54
55 VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
56 pVM->hwaccm.s.vmx.ulLastInstrError = instrError;
57 }
58 pVM->hwaccm.s.lLastError = rc;
59}
60
61/**
62 * Sets up and activates VT-x on the current CPU
63 *
64 * @returns VBox status code.
65 * @param pCpu CPU info struct
66 * @param pVM The VM to operate on.
67 * @param pvPageCpu Pointer to the global cpu page
68 * @param pPageCpuPhys Physical address of the global cpu page
69 */
70HWACCMR0DECL(int) VMXR0EnableCpu(PHWACCM_CPUINFO pCpu, PVM pVM, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
71{
72 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
73 AssertReturn(pVM, VERR_INVALID_PARAMETER);
74 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
75
76 /* Setup Intel VMX. */
77 Assert(pVM->hwaccm.s.vmx.fSupported);
78
79#ifdef LOG_ENABLED
80 SUPR0Printf("VMXR0EnableCpu cpu %d page (%x) %x\n", pCpu->idCpu, pvPageCpu, (uint32_t)pPageCpuPhys);
81#endif
82 /* Set revision dword at the beginning of the VMXON structure. */
83 *(uint32_t *)pvPageCpu = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
84
85 /* @todo we should unmap the two pages from the virtual address space in order to prevent accidental corruption.
86 * (which can have very bad consequences!!!)
87 */
88
89 /* Make sure the VMX instructions don't cause #UD faults. */
90 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
91
92 /* Enter VMX Root Mode */
93 int rc = VMXEnable(pPageCpuPhys);
94 if (VBOX_FAILURE(rc))
95 {
96 VMXR0CheckError(pVM, rc);
97 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
98 return VERR_VMX_VMXON_FAILED;
99 }
100 return VINF_SUCCESS;
101}
102
103/**
104 * Deactivates VT-x on the current CPU
105 *
106 * @returns VBox status code.
107 * @param pCpu CPU info struct
108 * @param pvPageCpu Pointer to the global cpu page
109 * @param pPageCpuPhys Physical address of the global cpu page
110 */
111HWACCMR0DECL(int) VMXR0DisableCpu(PHWACCM_CPUINFO pCpu, void *pvPageCpu, RTHCPHYS pPageCpuPhys)
112{
113 AssertReturn(pPageCpuPhys, VERR_INVALID_PARAMETER);
114 AssertReturn(pvPageCpu, VERR_INVALID_PARAMETER);
115
116 /* Leave VMX Root Mode. */
117 VMXDisable();
118
119 /* And clear the X86_CR4_VMXE bit */
120 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
121
122#ifdef LOG_ENABLED
123 SUPR0Printf("VMXR0DisableCpu cpu %d\n", pCpu->idCpu);
124#endif
125 return VINF_SUCCESS;
126}
127
128/**
129 * Does Ring-0 per VM VT-x init.
130 *
131 * @returns VBox status code.
132 * @param pVM The VM to operate on.
133 */
134HWACCMR0DECL(int) VMXR0InitVM(PVM pVM)
135{
136 int rc;
137
138#ifdef LOG_ENABLED
139 SUPR0Printf("VMXR0InitVM %x\n", pVM);
140#endif
141 pVM->hwaccm.s.vmx.pMemObjVMCS = NIL_RTR0MEMOBJ;
142 pVM->hwaccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
143 pVM->hwaccm.s.vmx.pMemObjRealModeTSS = NIL_RTR0MEMOBJ;
144
145
146 /* Allocate one page for the VM control structure (VMCS). */
147 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjVMCS, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
148 AssertRC(rc);
149 if (RT_FAILURE(rc))
150 return rc;
151
152 pVM->hwaccm.s.vmx.pVMCS = RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjVMCS);
153 pVM->hwaccm.s.vmx.pVMCSPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjVMCS, 0);
154 ASMMemZero32(pVM->hwaccm.s.vmx.pVMCS, PAGE_SIZE);
155
156 /* Allocate one page for the TSS we need for real mode emulation. */
157 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjRealModeTSS, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
158 AssertRC(rc);
159 if (RT_FAILURE(rc))
160 return rc;
161
162 pVM->hwaccm.s.vmx.pRealModeTSS = (PVBOXTSS)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjRealModeTSS);
163 pVM->hwaccm.s.vmx.pRealModeTSSPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjRealModeTSS, 0);
164
165 /* The I/O bitmap starts right after the virtual interrupt redirection bitmap. Outside the TSS on purpose; the CPU will not check it
166 * for I/O operations. */
167 ASMMemZero32(pVM->hwaccm.s.vmx.pRealModeTSS, PAGE_SIZE);
168 pVM->hwaccm.s.vmx.pRealModeTSS->offIoBitmap = sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS);
169 /* Bit set to 0 means redirection enabled. */
170 memset(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap, 0x0, sizeof(pVM->hwaccm.s.vmx.pRealModeTSS->IntRedirBitmap));
171
172 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
173 {
174 /* Allocate one page for the virtual APIC mmio cache. */
175 rc = RTR0MemObjAllocCont(&pVM->hwaccm.s.vmx.pMemObjAPIC, 1 << PAGE_SHIFT, true /* executable R0 mapping */);
176 AssertRC(rc);
177 if (RT_FAILURE(rc))
178 return rc;
179
180 pVM->hwaccm.s.vmx.pAPIC = (uint8_t *)RTR0MemObjAddress(pVM->hwaccm.s.vmx.pMemObjAPIC);
181 pVM->hwaccm.s.vmx.pAPICPhys = RTR0MemObjGetPagePhysAddr(pVM->hwaccm.s.vmx.pMemObjAPIC, 0);
182 ASMMemZero32(pVM->hwaccm.s.vmx.pAPIC, PAGE_SIZE);
183 }
184 else
185 {
186 pVM->hwaccm.s.vmx.pMemObjAPIC = 0;
187 pVM->hwaccm.s.vmx.pAPIC = 0;
188 pVM->hwaccm.s.vmx.pAPICPhys = 0;
189 }
190
191#ifdef LOG_ENABLED
192 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x) RealModeTSS=%x (%x)\n", pVM, pVM->hwaccm.s.vmx.pVMCS, (uint32_t)pVM->hwaccm.s.vmx.pVMCSPhys, pVM->hwaccm.s.vmx.pRealModeTSS, (uint32_t)pVM->hwaccm.s.vmx.pRealModeTSSPhys);
193#endif
194 return VINF_SUCCESS;
195}
196
197/**
198 * Does Ring-0 per VM VT-x termination.
199 *
200 * @returns VBox status code.
201 * @param pVM The VM to operate on.
202 */
203HWACCMR0DECL(int) VMXR0TermVM(PVM pVM)
204{
205 if (pVM->hwaccm.s.vmx.pMemObjVMCS != NIL_RTR0MEMOBJ)
206 {
207 RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjVMCS, false);
208 pVM->hwaccm.s.vmx.pMemObjVMCS = NIL_RTR0MEMOBJ;
209 pVM->hwaccm.s.vmx.pVMCS = 0;
210 pVM->hwaccm.s.vmx.pVMCSPhys = 0;
211 }
212 if (pVM->hwaccm.s.vmx.pMemObjRealModeTSS != NIL_RTR0MEMOBJ)
213 {
214 RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjRealModeTSS, false);
215 pVM->hwaccm.s.vmx.pMemObjRealModeTSS = NIL_RTR0MEMOBJ;
216 pVM->hwaccm.s.vmx.pRealModeTSS = 0;
217 pVM->hwaccm.s.vmx.pRealModeTSSPhys = 0;
218 }
219 if (pVM->hwaccm.s.vmx.pMemObjAPIC != NIL_RTR0MEMOBJ)
220 {
221 RTR0MemObjFree(pVM->hwaccm.s.vmx.pMemObjAPIC, false);
222 pVM->hwaccm.s.vmx.pMemObjAPIC = NIL_RTR0MEMOBJ;
223 pVM->hwaccm.s.vmx.pAPIC = 0;
224 pVM->hwaccm.s.vmx.pAPICPhys = 0;
225 }
226 return VINF_SUCCESS;
227}
228
229/**
230 * Sets up VT-x for the specified VM
231 *
232 * @returns VBox status code.
233 * @param pVM The VM to operate on.
234 */
235HWACCMR0DECL(int) VMXR0SetupVM(PVM pVM)
236{
237 int rc = VINF_SUCCESS;
238 uint32_t val;
239
240 AssertReturn(pVM, VERR_INVALID_PARAMETER);
241 Assert(pVM->hwaccm.s.vmx.pVMCS);
242
243 /* Set revision dword at the beginning of the VMCS structure. */
244 *(uint32_t *)pVM->hwaccm.s.vmx.pVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hwaccm.s.vmx.msr.vmx_basic_info);
245
246 /* Clear VM Control Structure. */
247 Log(("pVMCSPhys = %VHp\n", pVM->hwaccm.s.vmx.pVMCSPhys));
248 rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
249 if (VBOX_FAILURE(rc))
250 goto vmx_end;
251
252 /* Activate the VM Control Structure. */
253 rc = VMXActivateVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
254 if (VBOX_FAILURE(rc))
255 goto vmx_end;
256
257 /* VMX_VMCS_CTRL_PIN_EXEC_CONTROLS
258 * Set required bits to one and zero according to the MSR capabilities.
259 */
260 val = pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
261 /* External and non-maskable interrupts cause VM-exits. */
262 val = val | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT;
263 val &= pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
264
265 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, val);
266 AssertRC(rc);
267
268 /* VMX_VMCS_CTRL_PROC_EXEC_CONTROLS
269 * Set required bits to one and zero according to the MSR capabilities.
270 */
271 val = pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
272 /* Program which event cause VM-exits and which features we want to use. */
273 val = val | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
274 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET
275 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
276 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT
277 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT
278 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* don't execute mwait or else we'll idle inside the guest (host thinks the cpu load is high) */
279
280 /** @note VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch failure with an invalid control fields error. (combined with some other exit reasons) */
281
282#if HC_ARCH_BITS == 64
283 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
284 {
285 /* CR8 reads from the APIC shadow page; writes cause an exit is they lower the TPR below the threshold */
286 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW;
287 Assert(pVM->hwaccm.s.vmx.pAPIC);
288 }
289 else
290 /* Exit on CR8 reads & writes in case the TPR shadow feature isn't present. */
291 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT;
292#endif
293 /* Mask away the bits that the CPU doesn't support */
294 /** @todo make sure they don't conflict with the above requirements. */
295 val &= pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
296 pVM->hwaccm.s.vmx.proc_ctls = val;
297
298 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, val);
299 AssertRC(rc);
300
301 /* VMX_VMCS_CTRL_CR3_TARGET_COUNT
302 * Set required bits to one and zero according to the MSR capabilities.
303 */
304 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR3_TARGET_COUNT, 0);
305 AssertRC(rc);
306
307 /* VMX_VMCS_CTRL_EXIT_CONTROLS
308 * Set required bits to one and zero according to the MSR capabilities.
309 */
310 val = pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0;
311#if HC_ARCH_BITS == 64
312 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_AMD64;
313#else
314 /* else Must be zero when AMD64 is not available. */
315#endif
316 val &= pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1;
317 /* Don't acknowledge external interrupts on VM-exit. */
318 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, val);
319 AssertRC(rc);
320
321 /* Forward all exception except #NM & #PF to the guest.
322 * We always need to check pagefaults since our shadow page table can be out of sync.
323 * And we always lazily sync the FPU & XMM state.
324 */
325
326 /*
327 * @todo Possible optimization:
328 * Keep the FPU and XMM state current in the EM thread. That way there's no need to
329 * lazily sync anything, but the downside is that we can't use the FPU stack or XMM
330 * registers ourselves of course.
331 *
332 * @note only possible if the current state is actually ours (X86_CR0_TS flag)
333 */
334 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, HWACCM_VMX_TRAP_MASK);
335 AssertRC(rc);
336
337 /* Don't filter page faults; all of them should cause a switch. */
338 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MASK, 0);
339 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_PAGEFAULT_ERROR_MATCH, 0);
340 AssertRC(rc);
341
342 /* Init TSC offset to zero. */
343 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, 0);
344#if HC_ARCH_BITS == 32
345 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_HIGH, 0);
346#endif
347 AssertRC(rc);
348
349 rc = VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_A_FULL, 0);
350#if HC_ARCH_BITS == 32
351 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_A_HIGH, 0);
352#endif
353 AssertRC(rc);
354
355 rc = VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_B_FULL, 0);
356#if HC_ARCH_BITS == 32
357 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_IO_BITMAP_B_HIGH, 0);
358#endif
359 AssertRC(rc);
360
361 /* Clear MSR controls. */
362 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
363 {
364 /* Optional */
365 rc = VMXWriteVMCS(VMX_VMCS_CTRL_MSR_BITMAP_FULL, 0);
366#if HC_ARCH_BITS == 32
367 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_MSR_BITMAP_HIGH, 0);
368#endif
369 AssertRC(rc);
370 }
371 rc = VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_FULL, 0);
372 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_FULL, 0);
373 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMENTRY_MSR_LOAD_FULL, 0);
374#if HC_ARCH_BITS == 32
375 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_STORE_HIGH, 0);
376 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_HIGH, 0);
377 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VMEXIT_MSR_LOAD_HIGH, 0);
378#endif
379 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_STORE_COUNT, 0);
380 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_EXIT_MSR_LOAD_COUNT, 0);
381 AssertRC(rc);
382
383 if (pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
384 {
385 Assert(pVM->hwaccm.s.vmx.pMemObjAPIC);
386 /* Optional */
387 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, 0);
388 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VAPIC_PAGEADDR_FULL, pVM->hwaccm.s.vmx.pAPICPhys);
389#if HC_ARCH_BITS == 32
390 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_VAPIC_PAGEADDR_HIGH, pVM->hwaccm.s.vmx.pAPICPhys >> 32);
391#endif
392 AssertRC(rc);
393 }
394
395 /* Set link pointer to -1. Not currently used. */
396#if HC_ARCH_BITS == 32
397 rc = VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFF);
398 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_HIGH, 0xFFFFFFFF);
399#else
400 rc = VMXWriteVMCS(VMX_VMCS_GUEST_LINK_PTR_FULL, 0xFFFFFFFFFFFFFFFF);
401#endif
402 AssertRC(rc);
403
404 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
405 rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
406 AssertRC(rc);
407
408vmx_end:
409 VMXR0CheckError(pVM, rc);
410 return rc;
411}
412
413
414/**
415 * Injects an event (trap or external interrupt)
416 *
417 * @returns VBox status code.
418 * @param pVM The VM to operate on.
419 * @param pCtx CPU Context
420 * @param intInfo VMX interrupt info
421 * @param cbInstr Opcode length of faulting instruction
422 * @param errCode Error code (optional)
423 */
424static int VMXR0InjectEvent(PVM pVM, CPUMCTX *pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode)
425{
426 int rc;
427
428#ifdef VBOX_STRICT
429 uint32_t iGate = VMX_EXIT_INTERRUPTION_INFO_VECTOR(intInfo);
430 if (iGate == 0xE)
431 Log2(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x CR2=%08x intInfo=%08x\n", iGate, pCtx->rip, errCode, pCtx->cr2, intInfo));
432 else
433 if (iGate < 0x20)
434 Log2(("VMXR0InjectEvent: Injecting interrupt %d at %VGv error code=%08x\n", iGate, pCtx->rip, errCode));
435 else
436 {
437 Log2(("INJ-EI: %x at %VGv\n", iGate, pCtx->rip));
438 Assert(!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS));
439 Assert(pCtx->eflags.u32 & X86_EFL_IF);
440 }
441#endif
442
443 /* Set event injection state. */
444 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_IRQ_INFO,
445 intInfo | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT)
446 );
447
448 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
449 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_EXCEPTION_ERRCODE, errCode);
450
451 AssertRC(rc);
452 return rc;
453}
454
455
456/**
457 * Checks for pending guest interrupts and injects them
458 *
459 * @returns VBox status code.
460 * @param pVM The VM to operate on.
461 * @param pCtx CPU Context
462 */
463static int VMXR0CheckPendingInterrupt(PVM pVM, CPUMCTX *pCtx)
464{
465 int rc;
466
467 /* Dispatch any pending interrupts. (injected before, but a VM exit occurred prematurely) */
468 if (pVM->hwaccm.s.Event.fPending)
469 {
470 Log(("Reinjecting event %VX64 %08x at %VGv cr2=%RX64\n", pVM->hwaccm.s.Event.intInfo, pVM->hwaccm.s.Event.errCode, pCtx->rip, pCtx->cr2));
471 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntReinject);
472 rc = VMXR0InjectEvent(pVM, pCtx, pVM->hwaccm.s.Event.intInfo, 0, pVM->hwaccm.s.Event.errCode);
473 AssertRC(rc);
474
475 pVM->hwaccm.s.Event.fPending = false;
476 return VINF_SUCCESS;
477 }
478
479 /* When external interrupts are pending, we should exit the VM when IF is set. */
480 if ( !TRPMHasTrap(pVM)
481 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
482 {
483 if (!(pCtx->eflags.u32 & X86_EFL_IF))
484 {
485 Log2(("Enable irq window exit!\n"));
486 pVM->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
487 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
488 AssertRC(rc);
489 }
490 else
491 if (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
492 {
493 uint8_t u8Interrupt;
494
495 rc = PDMGetInterrupt(pVM, &u8Interrupt);
496 Log(("Dispatch interrupt: u8Interrupt=%x (%d) rc=%Vrc\n", u8Interrupt, u8Interrupt, rc));
497 if (VBOX_SUCCESS(rc))
498 {
499 rc = TRPMAssertTrap(pVM, u8Interrupt, TRPM_HARDWARE_INT);
500 AssertRC(rc);
501 }
502 else
503 {
504 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
505 Assert(!VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)));
506 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchGuestIrq);
507 /* Just continue */
508 }
509 }
510 else
511 Log(("Pending interrupt blocked at %VGv by VM_FF_INHIBIT_INTERRUPTS!!\n", pCtx->rip));
512 }
513
514#ifdef VBOX_STRICT
515 if (TRPMHasTrap(pVM))
516 {
517 uint8_t u8Vector;
518 rc = TRPMQueryTrapAll(pVM, &u8Vector, 0, 0, 0);
519 AssertRC(rc);
520 }
521#endif
522
523 if ( pCtx->eflags.u32 & X86_EFL_IF
524 && (!VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
525 && TRPMHasTrap(pVM)
526 )
527 {
528 uint8_t u8Vector;
529 int rc;
530 TRPMEVENT enmType;
531 RTGCUINTPTR intInfo;
532 RTGCUINT errCode;
533
534 /* If a new event is pending, then dispatch it now. */
535 rc = TRPMQueryTrapAll(pVM, &u8Vector, &enmType, &errCode, 0);
536 AssertRC(rc);
537 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
538 Assert(enmType != TRPM_SOFTWARE_INT);
539
540 /* Clear the pending trap. */
541 rc = TRPMResetTrap(pVM);
542 AssertRC(rc);
543
544 intInfo = u8Vector;
545 intInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
546
547 if (enmType == TRPM_TRAP)
548 {
549 switch (u8Vector) {
550 case 8:
551 case 10:
552 case 11:
553 case 12:
554 case 13:
555 case 14:
556 case 17:
557 /* Valid error codes. */
558 intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
559 break;
560 default:
561 break;
562 }
563 if (u8Vector == X86_XCPT_BP || u8Vector == X86_XCPT_OF)
564 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
565 else
566 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
567 }
568 else
569 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
570
571 STAM_COUNTER_INC(&pVM->hwaccm.s.StatIntInject);
572 rc = VMXR0InjectEvent(pVM, pCtx, intInfo, 0, errCode);
573 AssertRC(rc);
574 } /* if (interrupts can be dispatched) */
575
576 return VINF_SUCCESS;
577}
578
579/**
580 * Save the host state
581 *
582 * @returns VBox status code.
583 * @param pVM The VM to operate on.
584 */
585HWACCMR0DECL(int) VMXR0SaveHostState(PVM pVM)
586{
587 int rc = VINF_SUCCESS;
588
589 /*
590 * Host CPU Context
591 */
592 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_HOST_CONTEXT)
593 {
594 RTIDTR idtr;
595 RTGDTR gdtr;
596 RTSEL SelTR;
597 PX86DESCHC pDesc;
598 uintptr_t trBase;
599
600 /* Control registers */
601 rc = VMXWriteVMCS(VMX_VMCS_HOST_CR0, ASMGetCR0());
602 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR3, ASMGetCR3());
603 rc |= VMXWriteVMCS(VMX_VMCS_HOST_CR4, ASMGetCR4());
604 AssertRC(rc);
605 Log2(("VMX_VMCS_HOST_CR0 %08x\n", ASMGetCR0()));
606 Log2(("VMX_VMCS_HOST_CR3 %VHp\n", ASMGetCR3()));
607 Log2(("VMX_VMCS_HOST_CR4 %08x\n", ASMGetCR4()));
608
609 /* Selector registers. */
610 rc = VMXWriteVMCS(VMX_VMCS_HOST_FIELD_CS, ASMGetCS());
611 /** @note VMX is (again) very picky about the RPL of the selectors here; we'll restore them manually. */
612 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_DS, 0);
613 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_ES, 0);
614#if HC_ARCH_BITS == 32
615 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_FS, 0);
616 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_GS, 0);
617#endif
618 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_SS, ASMGetSS());
619 SelTR = ASMGetTR();
620 rc |= VMXWriteVMCS(VMX_VMCS_HOST_FIELD_TR, SelTR);
621 AssertRC(rc);
622 Log2(("VMX_VMCS_HOST_FIELD_CS %08x\n", ASMGetCS()));
623 Log2(("VMX_VMCS_HOST_FIELD_DS %08x\n", ASMGetDS()));
624 Log2(("VMX_VMCS_HOST_FIELD_ES %08x\n", ASMGetES()));
625 Log2(("VMX_VMCS_HOST_FIELD_FS %08x\n", ASMGetFS()));
626 Log2(("VMX_VMCS_HOST_FIELD_GS %08x\n", ASMGetGS()));
627 Log2(("VMX_VMCS_HOST_FIELD_SS %08x\n", ASMGetSS()));
628 Log2(("VMX_VMCS_HOST_FIELD_TR %08x\n", ASMGetTR()));
629
630 /* GDTR & IDTR */
631 ASMGetGDTR(&gdtr);
632 rc = VMXWriteVMCS(VMX_VMCS_HOST_GDTR_BASE, gdtr.pGdt);
633 ASMGetIDTR(&idtr);
634 rc |= VMXWriteVMCS(VMX_VMCS_HOST_IDTR_BASE, idtr.pIdt);
635 AssertRC(rc);
636 Log2(("VMX_VMCS_HOST_GDTR_BASE %VHv\n", gdtr.pGdt));
637 Log2(("VMX_VMCS_HOST_IDTR_BASE %VHv\n", idtr.pIdt));
638
639 /* Save the base address of the TR selector. */
640 if (SelTR > gdtr.cbGdt)
641 {
642 AssertMsgFailed(("Invalid TR selector %x. GDTR.cbGdt=%x\n", SelTR, gdtr.cbGdt));
643 return VERR_VMX_INVALID_HOST_STATE;
644 }
645
646 pDesc = &((PX86DESCHC)gdtr.pGdt)[SelTR >> X86_SEL_SHIFT_HC];
647#if HC_ARCH_BITS == 64
648 trBase = X86DESC64_BASE(*pDesc);
649#else
650 trBase = X86DESC_BASE(*pDesc);
651#endif
652 rc = VMXWriteVMCS(VMX_VMCS_HOST_TR_BASE, trBase);
653 AssertRC(rc);
654 Log2(("VMX_VMCS_HOST_TR_BASE %VHv\n", trBase));
655
656 /* FS and GS base. */
657#if HC_ARCH_BITS == 64
658 Log2(("MSR_K8_FS_BASE = %VHv\n", ASMRdMsr(MSR_K8_FS_BASE)));
659 Log2(("MSR_K8_GS_BASE = %VHv\n", ASMRdMsr(MSR_K8_GS_BASE)));
660 rc = VMXWriteVMCS64(VMX_VMCS_HOST_FS_BASE, ASMRdMsr(MSR_K8_FS_BASE));
661 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_GS_BASE, ASMRdMsr(MSR_K8_GS_BASE));
662#endif
663 AssertRC(rc);
664
665 /* Sysenter MSRs. */
666 /** @todo expensive!! */
667 rc = VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
668 Log2(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)));
669#if HC_ARCH_BITS == 32
670 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
671 rc |= VMXWriteVMCS(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
672 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
673 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
674#else
675 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", ASMRdMsr(MSR_IA32_SYSENTER_EIP)));
676 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", ASMRdMsr(MSR_IA32_SYSENTER_ESP)));
677 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
678 rc |= VMXWriteVMCS64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
679#endif
680 AssertRC(rc);
681
682 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_HOST_CONTEXT;
683 }
684 return rc;
685}
686
687
688/**
689 * Loads the guest state
690 *
691 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!
692 *
693 * @returns VBox status code.
694 * @param pVM The VM to operate on.
695 * @param pCtx Guest context
696 */
697HWACCMR0DECL(int) VMXR0LoadGuestState(PVM pVM, CPUMCTX *pCtx)
698{
699 int rc = VINF_SUCCESS;
700 RTGCUINTPTR val;
701 X86EFLAGS eflags;
702
703 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
704 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_SEGMENT_REGS)
705 {
706 VMX_WRITE_SELREG(ES, es);
707 AssertRC(rc);
708
709 VMX_WRITE_SELREG(CS, cs);
710 AssertRC(rc);
711
712 VMX_WRITE_SELREG(SS, ss);
713 AssertRC(rc);
714
715 VMX_WRITE_SELREG(DS, ds);
716 AssertRC(rc);
717
718 /* The base values in the hidden fs & gs registers are not in sync with the msrs; they are cut to 32 bits. */
719 VMX_WRITE_SELREG(FS, fs);
720 AssertRC(rc);
721
722 VMX_WRITE_SELREG(GS, gs);
723 AssertRC(rc);
724 }
725
726 /* Guest CPU context: LDTR. */
727 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_LDTR)
728 {
729 if (pCtx->ldtr == 0)
730 {
731 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_LDTR, 0);
732 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_LIMIT, 0);
733 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_BASE, 0);
734 /** @note vmlaunch will fail with 0 or just 0x02. No idea why. */
735 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_ACCESS_RIGHTS, 0x82 /* present, LDT */);
736 }
737 else
738 {
739 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_LDTR, pCtx->ldtr);
740 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_LIMIT, pCtx->ldtrHid.u32Limit);
741 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtrHid.u64Base);
742 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_LDTR_ACCESS_RIGHTS, pCtx->ldtrHid.Attr.u);
743 }
744 AssertRC(rc);
745 }
746 /* Guest CPU context: TR. */
747 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_TR)
748 {
749 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FIELD_TR, pCtx->tr);
750
751 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
752 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
753 {
754 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_LIMIT, sizeof(*pVM->hwaccm.s.vmx.pRealModeTSS));
755 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_BASE, 0);
756 }
757 else
758 {
759 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_LIMIT, pCtx->trHid.u32Limit);
760 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_BASE, pCtx->trHid.u64Base);
761 }
762 val = pCtx->trHid.Attr.u;
763
764 /* The TSS selector must be busy. */
765 if ((val & 0xF) == X86_SEL_TYPE_SYS_286_TSS_AVAIL)
766 val = (val & ~0xF) | X86_SEL_TYPE_SYS_286_TSS_BUSY;
767 else
768 /* Default even if no TR selector has been set (otherwise vmlaunch will fail!) */
769 val = (val & ~0xF) | X86_SEL_TYPE_SYS_386_TSS_BUSY;
770
771 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_TR_ACCESS_RIGHTS, val);
772 AssertRC(rc);
773 }
774 /* Guest CPU context: GDTR. */
775 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_GDTR)
776 {
777 rc = VMXWriteVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
778 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt);
779 AssertRC(rc);
780 }
781 /* Guest CPU context: IDTR. */
782 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_IDTR)
783 {
784 rc = VMXWriteVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
785 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt);
786 AssertRC(rc);
787 }
788
789 /*
790 * Sysenter MSRs (unconditional)
791 */
792 rc = VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
793 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
794 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
795 AssertRC(rc);
796
797 /* Control registers */
798 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR0)
799 {
800 val = pCtx->cr0;
801 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, val);
802 Log2(("Guest CR0-shadow %08x\n", val));
803 if (CPUMIsGuestFPUStateActive(pVM) == false)
804 {
805 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
806 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
807 }
808 else
809 {
810 /** @todo check if we support the old style mess correctly. */
811 if (!(val & X86_CR0_NE))
812 {
813 Log(("Forcing X86_CR0_NE!!!\n"));
814
815 /* Also catch floating point exceptions as we need to report them to the guest in a different way. */
816 if (!pVM->hwaccm.s.fFPUOldStyleOverride)
817 {
818 rc = VMXWriteVMCS(VMX_VMCS_CTRL_EXCEPTION_BITMAP, HWACCM_VMX_TRAP_MASK | RT_BIT(X86_XCPT_MF));
819 AssertRC(rc);
820 pVM->hwaccm.s.fFPUOldStyleOverride = true;
821 }
822 }
823
824 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
825 }
826 /* Note: protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */
827 val |= X86_CR0_PE | X86_CR0_PG;
828 /* Note: We must also set this as we rely on protecting various pages for which supervisor writes must be caught. */
829 val |= X86_CR0_WP;
830
831 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_CR0, val);
832 Log2(("Guest CR0 %08x\n", val));
833 /* CR0 flags owned by the host; if the guests attempts to change them, then
834 * the VM will exit.
835 */
836 val = X86_CR0_PE /* Must monitor this bit (assumptions are made for real mode emulation) */
837 | X86_CR0_WP /* Must monitor this bit (it must always be enabled). */
838 | X86_CR0_PG /* Must monitor this bit (assumptions are made for real mode & protected mode without paging emulation) */
839 | X86_CR0_TS
840 | X86_CR0_ET
841 | X86_CR0_NE
842 | X86_CR0_MP;
843 pVM->hwaccm.s.vmx.cr0_mask = val;
844
845 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR0_MASK, val);
846 Log2(("Guest CR0-mask %08x\n", val));
847 AssertRC(rc);
848 }
849 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR4)
850 {
851 /* CR4 */
852 rc = VMXWriteVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, pCtx->cr4);
853 Log2(("Guest CR4-shadow %08x\n", pCtx->cr4));
854 /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */
855 val = pCtx->cr4 | (uint32_t)pVM->hwaccm.s.vmx.msr.vmx_cr4_fixed0;
856 switch(pVM->hwaccm.s.enmShadowMode)
857 {
858 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */
859 case PGMMODE_PROTECTED: /* Protected mode, no paging -> emulated using identity mapping. */
860 case PGMMODE_32_BIT: /* 32-bit paging. */
861 break;
862
863 case PGMMODE_PAE: /* PAE paging. */
864 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
865 /** @todo use normal 32 bits paging */
866 val |= X86_CR4_PAE;
867 break;
868
869 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
870 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
871#ifdef VBOX_ENABLE_64_BITS_GUESTS
872 break;
873#else
874 AssertFailed();
875 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
876#endif
877 default: /* shut up gcc */
878 AssertFailed();
879 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
880 }
881 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
882 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
883 val |= X86_CR4_VME;
884
885 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_CR4, val);
886 Log2(("Guest CR4 %08x\n", val));
887 /* CR4 flags owned by the host; if the guests attempts to change them, then
888 * the VM will exit.
889 */
890 val = X86_CR4_PAE
891 | X86_CR4_PGE
892 | X86_CR4_PSE
893 | X86_CR4_VMXE;
894 pVM->hwaccm.s.vmx.cr4_mask = val;
895
896 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_CR4_MASK, val);
897 Log2(("Guest CR4-mask %08x\n", val));
898 AssertRC(rc);
899 }
900
901 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_CR3)
902 {
903 /* Save our shadow CR3 register. */
904 val = PGMGetHyperCR3(pVM);
905 Assert(val);
906 rc = VMXWriteVMCS(VMX_VMCS_GUEST_CR3, val);
907 AssertRC(rc);
908 }
909
910 /* Debug registers. */
911 if (pVM->hwaccm.s.fContextUseFlags & HWACCM_CHANGED_GUEST_DEBUG)
912 {
913 /** @todo DR0-6 */
914 val = pCtx->dr7;
915 val &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
916 val |= 0x400; /* must be one */
917#ifdef VBOX_STRICT
918 val = 0x400;
919#endif
920 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DR7, val);
921 AssertRC(rc);
922
923 /* IA32_DEBUGCTL MSR. */
924 rc = VMXWriteVMCS(VMX_VMCS_GUEST_DEBUGCTL_FULL, 0);
925 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUGCTL_HIGH, 0);
926 AssertRC(rc);
927
928 /** @todo */
929 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS, 0);
930 AssertRC(rc);
931 }
932
933 /* EIP, ESP and EFLAGS */
934 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RIP, pCtx->rip);
935 rc |= VMXWriteVMCS(VMX_VMCS_GUEST_RSP, pCtx->rsp);
936 AssertRC(rc);
937
938 /* Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1. */
939 eflags = pCtx->eflags;
940 eflags.u32 &= VMX_EFLAGS_RESERVED_0;
941 eflags.u32 |= VMX_EFLAGS_RESERVED_1;
942
943 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
944 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
945 {
946 eflags.Bits.u1VM = 1;
947 eflags.Bits.u1VIF = pCtx->eflags.Bits.u1IF;
948 eflags.Bits.u2IOPL = 3;
949 }
950
951 rc = VMXWriteVMCS(VMX_VMCS_GUEST_RFLAGS, eflags.u32);
952 AssertRC(rc);
953
954 /** TSC offset. */
955 uint64_t u64TSCOffset;
956
957 if (TMCpuTickCanUseRealTSC(pVM, &u64TSCOffset))
958 {
959 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET */
960#if HC_ARCH_BITS == 64
961 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, u64TSCOffset);
962#else
963 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_FULL, (uint32_t)u64TSCOffset);
964 rc |= VMXWriteVMCS(VMX_VMCS_CTRL_TSC_OFFSET_HIGH, (uint32_t)(u64TSCOffset >> 32ULL));
965#endif
966 AssertRC(rc);
967
968 pVM->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
969 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
970 AssertRC(rc);
971 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTSCOffset);
972 }
973 else
974 {
975 pVM->hwaccm.s.vmx.proc_ctls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
976 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
977 AssertRC(rc);
978 STAM_COUNTER_INC(&pVM->hwaccm.s.StatTSCIntercept);
979 }
980
981 /* VMX_VMCS_CTRL_ENTRY_CONTROLS
982 * Set required bits to one and zero according to the MSR capabilities.
983 */
984 val = pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0;
985 /* 64 bits guest mode? */
986 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
987 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA64_MODE;
988 /* else Must be zero when AMD64 is not available. */
989
990 /* Mask away the bits that the CPU doesn't support */
991 val &= pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1;
992 rc = VMXWriteVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, val);
993 AssertRC(rc);
994
995 /* 64 bits guest mode? */
996 if (pCtx->msrEFER & MSR_K6_EFER_LMA)
997 {
998#if !defined(VBOX_WITH_64_BITS_GUESTS) || HC_ARCH_BITS != 64
999 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1000#else
1001 pVM->hwaccm.s.vmx.pfnStartVM = VMXR0StartVM64;
1002#endif
1003 /* Unconditionally update these as wrmsr might have changed them. */
1004 rc = VMXWriteVMCS(VMX_VMCS_GUEST_FS_BASE, pCtx->fsHid.u64Base);
1005 AssertRC(rc);
1006 rc = VMXWriteVMCS(VMX_VMCS_GUEST_GS_BASE, pCtx->gsHid.u64Base);
1007 AssertRC(rc);
1008 }
1009 else
1010 {
1011 pVM->hwaccm.s.vmx.pfnStartVM = VMXR0StartVM32;
1012 }
1013
1014 /* Done. */
1015 pVM->hwaccm.s.fContextUseFlags &= ~HWACCM_CHANGED_ALL_GUEST;
1016
1017 return rc;
1018}
1019
1020/**
1021 * Runs guest code in a VT-x VM.
1022 *
1023 * @note NEVER EVER turn on interrupts here. Due to our illegal entry into the kernel, it might mess things up. (XP kernel traps have been frequently observed)
1024 *
1025 * @returns VBox status code.
1026 * @param pVM The VM to operate on.
1027 * @param pCtx Guest context
1028 */
1029HWACCMR0DECL(int) VMXR0RunGuestCode(PVM pVM, CPUMCTX *pCtx)
1030{
1031 int rc = VINF_SUCCESS;
1032 RTCCUINTREG val, valShadow;
1033 RTCCUINTREG exitReason, instrError, cbInstr;
1034 RTGCUINTPTR exitQualification;
1035 RTGCUINTPTR intInfo = 0; /* shut up buggy gcc 4 */
1036 RTGCUINTPTR errCode, instrInfo, uInterruptState;
1037 bool fGuestStateSynced = false;
1038 unsigned cResume = 0;
1039#ifdef VBOX_STRICT
1040 RTCPUID idCpuCheck;
1041#endif
1042
1043 Log2(("\nE"));
1044
1045 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatEntry, x);
1046
1047#ifdef VBOX_STRICT
1048 rc = VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
1049 AssertRC(rc);
1050 Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n", val));
1051
1052 /* allowed zero */
1053 if ((val & pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)
1054 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n"));
1055
1056 /* allowed one */
1057 if ((val & ~pVM->hwaccm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)
1058 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n"));
1059
1060 rc = VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
1061 AssertRC(rc);
1062 Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n", val));
1063
1064 /* allowed zero */
1065 if ((val & pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)
1066 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n"));
1067
1068 /* allowed one */
1069 if ((val & ~pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)
1070 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n"));
1071
1072 rc = VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
1073 AssertRC(rc);
1074 Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n", val));
1075
1076 /* allowed zero */
1077 if ((val & pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_entry.n.disallowed0)
1078 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n"));
1079
1080 /* allowed one */
1081 if ((val & ~pVM->hwaccm.s.vmx.msr.vmx_entry.n.allowed1) != 0)
1082 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n"));
1083
1084 rc = VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
1085 AssertRC(rc);
1086 Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n", val));
1087
1088 /* allowed zero */
1089 if ((val & pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hwaccm.s.vmx.msr.vmx_exit.n.disallowed0)
1090 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n"));
1091
1092 /* allowed one */
1093 if ((val & ~pVM->hwaccm.s.vmx.msr.vmx_exit.n.allowed1) != 0)
1094 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n"));
1095#endif
1096
1097#if 0
1098 /*
1099 * Check if debug registers are armed.
1100 */
1101 uint32_t u32DR7 = ASMGetDR7();
1102 if (u32DR7 & X86_DR7_ENABLED_MASK)
1103 {
1104 pVM->cpum.s.fUseFlags |= CPUM_USE_DEBUG_REGS_HOST;
1105 }
1106 else
1107 pVM->cpum.s.fUseFlags &= ~CPUM_USE_DEBUG_REGS_HOST;
1108#endif
1109
1110 /* We can jump to this point to resume execution after determining that a VM-exit is innocent.
1111 */
1112ResumeExecution:
1113 /* Safety precaution; looping for too long here can have a very bad effect on the host */
1114 if (++cResume > HWACCM_MAX_RESUME_LOOPS)
1115 {
1116 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitMaxResume);
1117 rc = VINF_EM_RAW_INTERRUPT;
1118 goto end;
1119 }
1120
1121 /* Check for irq inhibition due to instruction fusing (sti, mov ss). */
1122 if (VM_FF_ISSET(pVM, VM_FF_INHIBIT_INTERRUPTS))
1123 {
1124 Log(("VM_FF_INHIBIT_INTERRUPTS at %VGv successor %VGv\n", pCtx->rip, EMGetInhibitInterruptsPC(pVM)));
1125 if (pCtx->rip != EMGetInhibitInterruptsPC(pVM))
1126 {
1127 /** @note we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
1128 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
1129 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
1130 * break the guest. Sounds very unlikely, but such timing sensitive problem are not as rare as you might think.
1131 */
1132 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1133 /* Irq inhibition is no longer active; clear the corresponding VMX state. */
1134 rc = VMXWriteVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, 0);
1135 AssertRC(rc);
1136 }
1137 }
1138 else
1139 {
1140 /* Irq inhibition is no longer active; clear the corresponding VMX state. */
1141 rc = VMXWriteVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, 0);
1142 AssertRC(rc);
1143 }
1144
1145 /* Check for pending actions that force us to go back to ring 3. */
1146 if (VM_FF_ISPENDING(pVM, VM_FF_TO_R3 | VM_FF_TIMER))
1147 {
1148 VM_FF_CLEAR(pVM, VM_FF_TO_R3);
1149 STAM_COUNTER_INC(&pVM->hwaccm.s.StatSwitchToR3);
1150 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1151 rc = VINF_EM_RAW_TO_R3;
1152 goto end;
1153 }
1154 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
1155 if (VM_FF_ISPENDING(pVM, VM_FF_REQUEST))
1156 {
1157 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1158 rc = VINF_EM_PENDING_REQUEST;
1159 goto end;
1160 }
1161
1162 /* When external interrupts are pending, we should exit the VM when IF is set. */
1163 /** @note *after* VM_FF_INHIBIT_INTERRUPTS check!!! */
1164 rc = VMXR0CheckPendingInterrupt(pVM, pCtx);
1165 if (VBOX_FAILURE(rc))
1166 {
1167 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1168 goto end;
1169 }
1170
1171 /** @todo check timers?? */
1172
1173 /* TPR caching using CR8 is only available in 64 bits mode */
1174 /* Note the 32 bits exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but that appears missing in Intel CPUs */
1175 /* Note: we can't do this in LoadGuestState as PDMApicGetTPR can jump back to ring 3 (lock)!!!!! */
1176 /*
1177 * @todo reduce overhead
1178 */
1179 if ( pCtx->msrEFER & MSR_K6_EFER_LMA
1180 && pVM->hwaccm.s.vmx.pAPIC)
1181 {
1182 /* TPR caching in CR8 */
1183 uint8_t u8TPR;
1184 bool fPending;
1185
1186 int rc = PDMApicGetTPR(pVM, &u8TPR, &fPending);
1187 AssertRC(rc);
1188 /* The TPR can be found at offset 0x80 in the APIC mmio page. */
1189 pVM->hwaccm.s.vmx.pAPIC[0x80] = u8TPR << 4; /* bits 7-4 contain the task priority */
1190
1191 /* Two options here:
1192 * - external interrupt pending, but masked by the TPR value.
1193 * -> CR8 updates that lower the TPR value to below the current value should cause an exit
1194 * - no pending interrupts
1195 * -> We don't need to be explicitely notified. There are enough world switches for detecting pending interrupts.
1196 */
1197 rc = VMXWriteVMCS(VMX_VMCS_CTRL_TPR_THRESHOLD, (fPending) ? u8TPR : 0);
1198 AssertRC(rc);
1199 }
1200
1201 /*
1202 * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING 3!
1203 * (until the actual world switch)
1204 */
1205#ifdef VBOX_STRICT
1206 idCpuCheck = RTMpCpuId();
1207#endif
1208 /* Save the host state first. */
1209 rc = VMXR0SaveHostState(pVM);
1210 if (rc != VINF_SUCCESS)
1211 {
1212 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1213 goto end;
1214 }
1215 /* Load the guest state */
1216 rc = VMXR0LoadGuestState(pVM, pCtx);
1217 if (rc != VINF_SUCCESS)
1218 {
1219 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1220 goto end;
1221 }
1222 fGuestStateSynced = true;
1223
1224 /* Non-register state Guest Context */
1225 /** @todo change me according to cpu state */
1226 rc = VMXWriteVMCS(VMX_VMCS_GUEST_ACTIVITY_STATE, VMX_CMS_GUEST_ACTIVITY_ACTIVE);
1227 AssertRC(rc);
1228
1229 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatEntry, x);
1230
1231 /* Manual save and restore:
1232 * - General purpose registers except RIP, RSP
1233 *
1234 * Trashed:
1235 * - CR2 (we don't care)
1236 * - LDTR (reset to 0)
1237 * - DRx (presumably not changed at all)
1238 * - DR7 (reset to 0x400)
1239 * - EFLAGS (reset to RT_BIT(1); not relevant)
1240 *
1241 */
1242
1243 /* All done! Let's start VM execution. */
1244 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatInGC, x);
1245#ifdef VBOX_STRICT
1246 Assert(idCpuCheck == RTMpCpuId());
1247#endif
1248 rc = pVM->hwaccm.s.vmx.pfnStartVM(pVM->hwaccm.s.vmx.fResumeVM, pCtx);
1249
1250 /* In case we execute a goto ResumeExecution later on. */
1251 pVM->hwaccm.s.vmx.fResumeVM = true;
1252
1253 /**
1254 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1255 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
1256 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1257 */
1258
1259 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatInGC, x);
1260 STAM_PROFILE_ADV_START(&pVM->hwaccm.s.StatExit, x);
1261
1262 switch (rc)
1263 {
1264 case VINF_SUCCESS:
1265 break;
1266
1267 case VERR_VMX_INVALID_VMXON_PTR:
1268 AssertFailed();
1269 goto end;
1270
1271 case VERR_VMX_UNABLE_TO_START_VM:
1272 case VERR_VMX_UNABLE_TO_RESUME_VM:
1273 {
1274#ifdef VBOX_STRICT
1275 int rc1;
1276
1277 rc1 = VMXReadVMCS(VMX_VMCS_RO_EXIT_REASON, &exitReason);
1278 rc1 |= VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
1279 AssertRC(rc1);
1280 if (rc1 == VINF_SUCCESS)
1281 {
1282 RTGDTR gdtr;
1283 PX86DESCHC pDesc;
1284
1285 ASMGetGDTR(&gdtr);
1286
1287 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason, (uint32_t)instrError));
1288 Log(("Current stack %08x\n", &rc1));
1289
1290
1291 VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val);
1292 Log(("Old eip %VGv new %VGv\n", pCtx->rip, (RTGCPTR)val));
1293 VMXReadVMCS(VMX_VMCS_CTRL_PIN_EXEC_CONTROLS, &val);
1294 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val));
1295 VMXReadVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, &val);
1296 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val));
1297 VMXReadVMCS(VMX_VMCS_CTRL_ENTRY_CONTROLS, &val);
1298 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val));
1299 VMXReadVMCS(VMX_VMCS_CTRL_EXIT_CONTROLS, &val);
1300 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val));
1301
1302 VMXReadVMCS(VMX_VMCS_HOST_CR0, &val);
1303 Log(("VMX_VMCS_HOST_CR0 %08x\n", val));
1304
1305 VMXReadVMCS(VMX_VMCS_HOST_CR3, &val);
1306 Log(("VMX_VMCS_HOST_CR3 %VHp\n", val));
1307
1308 VMXReadVMCS(VMX_VMCS_HOST_CR4, &val);
1309 Log(("VMX_VMCS_HOST_CR4 %08x\n", val));
1310
1311 VMXReadVMCS(VMX_VMCS_HOST_FIELD_CS, &val);
1312 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val));
1313 if (val < gdtr.cbGdt)
1314 {
1315 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1316 HWACCMR0DumpDescriptor(pDesc, val, "CS: ");
1317 }
1318
1319 VMXReadVMCS(VMX_VMCS_HOST_FIELD_DS, &val);
1320 Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val));
1321 if (val < gdtr.cbGdt)
1322 {
1323 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1324 HWACCMR0DumpDescriptor(pDesc, val, "DS: ");
1325 }
1326
1327 VMXReadVMCS(VMX_VMCS_HOST_FIELD_ES, &val);
1328 Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val));
1329 if (val < gdtr.cbGdt)
1330 {
1331 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1332 HWACCMR0DumpDescriptor(pDesc, val, "ES: ");
1333 }
1334
1335 VMXReadVMCS(VMX_VMCS_HOST_FIELD_FS, &val);
1336 Log(("VMX_VMCS_HOST_FIELD_FS %08x\n", val));
1337 if (val < gdtr.cbGdt)
1338 {
1339 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1340 HWACCMR0DumpDescriptor(pDesc, val, "FS: ");
1341 }
1342
1343 VMXReadVMCS(VMX_VMCS_HOST_FIELD_GS, &val);
1344 Log(("VMX_VMCS_HOST_FIELD_GS %08x\n", val));
1345 if (val < gdtr.cbGdt)
1346 {
1347 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1348 HWACCMR0DumpDescriptor(pDesc, val, "GS: ");
1349 }
1350
1351 VMXReadVMCS(VMX_VMCS_HOST_FIELD_SS, &val);
1352 Log(("VMX_VMCS_HOST_FIELD_SS %08x\n", val));
1353 if (val < gdtr.cbGdt)
1354 {
1355 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1356 HWACCMR0DumpDescriptor(pDesc, val, "SS: ");
1357 }
1358
1359 VMXReadVMCS(VMX_VMCS_HOST_FIELD_TR, &val);
1360 Log(("VMX_VMCS_HOST_FIELD_TR %08x\n", val));
1361 if (val < gdtr.cbGdt)
1362 {
1363 pDesc = &((PX86DESCHC)gdtr.pGdt)[val >> X86_SEL_SHIFT_HC];
1364 HWACCMR0DumpDescriptor(pDesc, val, "TR: ");
1365 }
1366
1367 VMXReadVMCS(VMX_VMCS_HOST_TR_BASE, &val);
1368 Log(("VMX_VMCS_HOST_TR_BASE %VHv\n", val));
1369
1370 VMXReadVMCS(VMX_VMCS_HOST_GDTR_BASE, &val);
1371 Log(("VMX_VMCS_HOST_GDTR_BASE %VHv\n", val));
1372 VMXReadVMCS(VMX_VMCS_HOST_IDTR_BASE, &val);
1373 Log(("VMX_VMCS_HOST_IDTR_BASE %VHv\n", val));
1374
1375 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_CS, &val);
1376 Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val));
1377
1378 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_EIP, &val);
1379 Log(("VMX_VMCS_HOST_SYSENTER_EIP %VHv\n", val));
1380
1381 VMXReadVMCS(VMX_VMCS_HOST_SYSENTER_ESP, &val);
1382 Log(("VMX_VMCS_HOST_SYSENTER_ESP %VHv\n", val));
1383
1384 VMXReadVMCS(VMX_VMCS_HOST_RSP, &val);
1385 Log(("VMX_VMCS_HOST_RSP %VHv\n", val));
1386 VMXReadVMCS(VMX_VMCS_HOST_RIP, &val);
1387 Log(("VMX_VMCS_HOST_RIP %VHv\n", val));
1388
1389#if HC_ARCH_BITS == 64
1390 Log(("MSR_K6_EFER = %VX64\n", ASMRdMsr(MSR_K6_EFER)));
1391 Log(("MSR_K6_STAR = %VX64\n", ASMRdMsr(MSR_K6_STAR)));
1392 Log(("MSR_K8_LSTAR = %VX64\n", ASMRdMsr(MSR_K8_LSTAR)));
1393 Log(("MSR_K8_CSTAR = %VX64\n", ASMRdMsr(MSR_K8_CSTAR)));
1394 Log(("MSR_K8_SF_MASK = %VX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
1395#endif
1396 }
1397#endif /* VBOX_STRICT */
1398 goto end;
1399 }
1400
1401 default:
1402 /* impossible */
1403 AssertFailed();
1404 goto end;
1405 }
1406 /* Success. Query the guest state and figure out what has happened. */
1407
1408 /* Investigate why there was a VM-exit. */
1409 rc = VMXReadVMCS(VMX_VMCS_RO_EXIT_REASON, &exitReason);
1410 STAM_COUNTER_INC(&pVM->hwaccm.s.pStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);
1411
1412 exitReason &= 0xffff; /* bit 0-15 contain the exit code. */
1413 rc |= VMXReadVMCS(VMX_VMCS_RO_VM_INSTR_ERROR, &instrError);
1414 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INSTR_LENGTH, &cbInstr);
1415 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INTERRUPTION_INFO, &val);
1416 intInfo = val;
1417 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INTERRUPTION_ERRCODE, &val);
1418 errCode = val; /* might not be valid; depends on VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID. */
1419 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_INSTR_INFO, &val);
1420 instrInfo = val;
1421 rc |= VMXReadVMCS(VMX_VMCS_RO_EXIT_QUALIFICATION, &val);
1422 exitQualification = val;
1423 AssertRC(rc);
1424
1425 /* Let's first sync back eip, esp, and eflags. */
1426 rc = VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val);
1427 AssertRC(rc);
1428 pCtx->rip = val;
1429 rc = VMXReadVMCS(VMX_VMCS_GUEST_RSP, &val);
1430 AssertRC(rc);
1431 pCtx->rsp = val;
1432 rc = VMXReadVMCS(VMX_VMCS_GUEST_RFLAGS, &val);
1433 AssertRC(rc);
1434 pCtx->eflags.u32 = val;
1435
1436 /* Update the APIC with the cached TPR value.
1437 * @todo reduce overhead
1438 */
1439 if ( pCtx->msrEFER & MSR_K6_EFER_LMA
1440 && pVM->hwaccm.s.vmx.pAPIC)
1441 {
1442 rc = PDMApicSetTPR(pVM, pVM->hwaccm.s.vmx.pAPIC[0x80] >> 4);
1443 AssertRC(rc);
1444 }
1445
1446 /* Take care of instruction fusing (sti, mov ss) */
1447 rc |= VMXReadVMCS(VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE, &val);
1448 uInterruptState = val;
1449 if (uInterruptState != 0)
1450 {
1451 Assert(uInterruptState <= 2); /* only sti & mov ss */
1452 Log(("uInterruptState %x eip=%VGv\n", uInterruptState, pCtx->rip));
1453 EMSetInhibitInterruptsPC(pVM, pCtx->rip);
1454 }
1455 else
1456 VM_FF_CLEAR(pVM, VM_FF_INHIBIT_INTERRUPTS);
1457
1458 /* Real mode emulation using v86 mode with CR4.VME (interrupt redirection using the int bitmap in the TSS) */
1459 if (!(pCtx->cr0 & X86_CR0_PROTECTION_ENABLE))
1460 {
1461 /* Hide our emulation flags */
1462 pCtx->eflags.Bits.u1VM = 0;
1463 pCtx->eflags.Bits.u1IF = pCtx->eflags.Bits.u1VIF;
1464 pCtx->eflags.Bits.u1VIF = 0;
1465 pCtx->eflags.Bits.u2IOPL = 0;
1466 }
1467
1468 /* Control registers. */
1469 VMXReadVMCS(VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow);
1470 VMXReadVMCS(VMX_VMCS_GUEST_CR0, &val);
1471 val = (valShadow & pVM->hwaccm.s.vmx.cr0_mask) | (val & ~pVM->hwaccm.s.vmx.cr0_mask);
1472 CPUMSetGuestCR0(pVM, val);
1473
1474 VMXReadVMCS(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow);
1475 VMXReadVMCS(VMX_VMCS_GUEST_CR4, &val);
1476 val = (valShadow & pVM->hwaccm.s.vmx.cr4_mask) | (val & ~pVM->hwaccm.s.vmx.cr4_mask);
1477 CPUMSetGuestCR4(pVM, val);
1478
1479 CPUMSetGuestCR2(pVM, ASMGetCR2());
1480
1481 VMXReadVMCS(VMX_VMCS_GUEST_DR7, &val);
1482 CPUMSetGuestDR7(pVM, val);
1483
1484 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
1485 VMX_READ_SELREG(ES, es);
1486 VMX_READ_SELREG(SS, ss);
1487 VMX_READ_SELREG(CS, cs);
1488 VMX_READ_SELREG(DS, ds);
1489 VMX_READ_SELREG(FS, fs);
1490 VMX_READ_SELREG(GS, gs);
1491
1492 /** @note NOW IT'S SAFE FOR LOGGING! */
1493 Log2(("Raw exit reason %08x\n", exitReason));
1494
1495 /* Check if an injected event was interrupted prematurely. */
1496 rc = VMXReadVMCS(VMX_VMCS_RO_IDT_INFO, &val);
1497 AssertRC(rc);
1498 pVM->hwaccm.s.Event.intInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
1499 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVM->hwaccm.s.Event.intInfo)
1500 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVM->hwaccm.s.Event.intInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW)
1501 {
1502 pVM->hwaccm.s.Event.fPending = true;
1503 /* Error code present? */
1504 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVM->hwaccm.s.Event.intInfo))
1505 {
1506 rc = VMXReadVMCS(VMX_VMCS_RO_IDT_ERRCODE, &val);
1507 AssertRC(rc);
1508 pVM->hwaccm.s.Event.errCode = val;
1509 Log(("Pending inject %VX64 at %VGv exit=%08x intInfo=%08x exitQualification=%08x pending error=%RX64\n", pVM->hwaccm.s.Event.intInfo, pCtx->rip, exitReason, intInfo, exitQualification, val));
1510 }
1511 else
1512 {
1513 Log(("Pending inject %VX64 at %VGv exit=%08x intInfo=%08x exitQualification=%08x\n", pVM->hwaccm.s.Event.intInfo, pCtx->rip, exitReason, intInfo, exitQualification));
1514 pVM->hwaccm.s.Event.errCode = 0;
1515 }
1516 }
1517
1518#ifdef VBOX_STRICT
1519 if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE)
1520 HWACCMDumpRegs(pVM, pCtx);
1521#endif
1522
1523 Log2(("E%d", exitReason));
1524 Log2(("Exit reason %d, exitQualification %08x\n", exitReason, exitQualification));
1525 Log2(("instrInfo=%d instrError=%d instr length=%d\n", instrInfo, instrError, cbInstr));
1526 Log2(("Interruption error code %d\n", errCode));
1527 Log2(("IntInfo = %08x\n", intInfo));
1528 Log2(("New EIP=%VGv\n", pCtx->rip));
1529
1530 /* Some cases don't need a complete resync of the guest CPU state; handle them here. */
1531 switch (exitReason)
1532 {
1533 case VMX_EXIT_EXCEPTION: /* 0 Exception or non-maskable interrupt (NMI). */
1534 case VMX_EXIT_EXTERNAL_IRQ: /* 1 External interrupt. */
1535 {
1536 uint32_t vector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(intInfo);
1537
1538 if (!VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
1539 {
1540 Assert(exitReason == VMX_EXIT_EXTERNAL_IRQ);
1541 /* External interrupt; leave to allow it to be dispatched again. */
1542 rc = VINF_EM_RAW_INTERRUPT;
1543 break;
1544 }
1545 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo))
1546 {
1547 case VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI: /* Non-maskable interrupt. */
1548 /* External interrupt; leave to allow it to be dispatched again. */
1549 rc = VINF_EM_RAW_INTERRUPT;
1550 break;
1551
1552 case VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT: /* External hardware interrupt. */
1553 AssertFailed(); /* can't come here; fails the first check. */
1554 break;
1555
1556 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SWEXCPT: /* Software exception. (#BP or #OF) */
1557 Assert(vector == 3 || vector == 4);
1558 /* no break */
1559 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HWEXCPT: /* Hardware exception. */
1560 Log2(("Hardware/software interrupt %d\n", vector));
1561 switch (vector)
1562 {
1563 case X86_XCPT_NM:
1564 {
1565 Log(("#NM fault at %VGv error code %x\n", pCtx->rip, errCode));
1566
1567 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
1568 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
1569 rc = CPUMR0LoadGuestFPU(pVM, pCtx);
1570 if (rc == VINF_SUCCESS)
1571 {
1572 Assert(CPUMIsGuestFPUStateActive(pVM));
1573
1574 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowNM);
1575
1576 /* Continue execution. */
1577 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1578 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1579
1580 goto ResumeExecution;
1581 }
1582
1583 Log(("Forward #NM fault to the guest\n"));
1584 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNM);
1585 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, 0);
1586 AssertRC(rc);
1587 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1588 goto ResumeExecution;
1589 }
1590
1591 case X86_XCPT_PF: /* Page fault */
1592 {
1593 Log2(("Page fault at %VGv error code %x\n", exitQualification ,errCode));
1594 /* Exit qualification contains the linear address of the page fault. */
1595 TRPMAssertTrap(pVM, X86_XCPT_PF, TRPM_TRAP);
1596 TRPMSetErrorCode(pVM, errCode);
1597 TRPMSetFaultAddress(pVM, exitQualification);
1598
1599 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
1600 rc = PGMTrap0eHandler(pVM, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification);
1601 Log2(("PGMTrap0eHandler %VGv returned %Vrc\n", pCtx->rip, rc));
1602 if (rc == VINF_SUCCESS)
1603 { /* We've successfully synced our shadow pages, so let's just continue execution. */
1604 Log2(("Shadow page fault at %VGv cr2=%VGv error code %x\n", pCtx->rip, exitQualification ,errCode));
1605 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitShadowPF);
1606
1607 TRPMResetTrap(pVM);
1608
1609 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1610 goto ResumeExecution;
1611 }
1612 else
1613 if (rc == VINF_EM_RAW_GUEST_TRAP)
1614 { /* A genuine pagefault.
1615 * Forward the trap to the guest by injecting the exception and resuming execution.
1616 */
1617 Log2(("Forward page fault to the guest\n"));
1618 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestPF);
1619 /* The error code might have been changed. */
1620 errCode = TRPMGetErrorCode(pVM);
1621
1622 TRPMResetTrap(pVM);
1623
1624 /* Now we must update CR2. */
1625 pCtx->cr2 = exitQualification;
1626 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1627 AssertRC(rc);
1628
1629 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1630 goto ResumeExecution;
1631 }
1632#ifdef VBOX_STRICT
1633 if (rc != VINF_EM_RAW_EMULATE_INSTR)
1634 Log2(("PGMTrap0eHandler failed with %d\n", rc));
1635#endif
1636 /* Need to go back to the recompiler to emulate the instruction. */
1637 TRPMResetTrap(pVM);
1638 break;
1639 }
1640
1641 case X86_XCPT_MF: /* Floating point exception. */
1642 {
1643 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestMF);
1644 if (!(pCtx->cr0 & X86_CR0_NE))
1645 {
1646 /* old style FPU error reporting needs some extra work. */
1647 /** @todo don't fall back to the recompiler, but do it manually. */
1648 rc = VINF_EM_RAW_EMULATE_INSTR;
1649 break;
1650 }
1651 Log(("Trap %x at %VGv\n", vector, pCtx->rip));
1652 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1653 AssertRC(rc);
1654
1655 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1656 goto ResumeExecution;
1657 }
1658
1659#ifdef VBOX_STRICT
1660 case X86_XCPT_GP: /* General protection failure exception.*/
1661 case X86_XCPT_UD: /* Unknown opcode exception. */
1662 case X86_XCPT_DE: /* Debug exception. */
1663 case X86_XCPT_SS: /* Stack segment exception. */
1664 case X86_XCPT_NP: /* Segment not present exception. */
1665 {
1666 switch(vector)
1667 {
1668 case X86_XCPT_DE:
1669 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestDE);
1670 break;
1671 case X86_XCPT_UD:
1672 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestUD);
1673 break;
1674 case X86_XCPT_SS:
1675 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestSS);
1676 break;
1677 case X86_XCPT_NP:
1678 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestNP);
1679 break;
1680 case X86_XCPT_GP:
1681 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitGuestGP);
1682 break;
1683 }
1684
1685 Log(("Trap %x at %VGv\n", vector, pCtx->rip));
1686 rc = VMXR0InjectEvent(pVM, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo), cbInstr, errCode);
1687 AssertRC(rc);
1688
1689 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1690 goto ResumeExecution;
1691 }
1692#endif
1693 default:
1694 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
1695 rc = VERR_EM_INTERNAL_ERROR;
1696 break;
1697 } /* switch (vector) */
1698
1699 break;
1700
1701 default:
1702 rc = VERR_EM_INTERNAL_ERROR;
1703 AssertFailed();
1704 break;
1705 }
1706
1707 break;
1708 }
1709
1710 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */
1711 /* Clear VM-exit on IF=1 change. */
1712 Log2(("VMX_EXIT_IRQ_WINDOW %VGv\n", pCtx->rip));
1713 pVM->hwaccm.s.vmx.proc_ctls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_IRQ_WINDOW_EXIT;
1714 rc = VMXWriteVMCS(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVM->hwaccm.s.vmx.proc_ctls);
1715 AssertRC(rc);
1716 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIrqWindow);
1717 goto ResumeExecution; /* we check for pending guest interrupts there */
1718
1719 case VMX_EXIT_INVD: /* 13 Guest software attempted to execute INVD. */
1720 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvd);
1721 /* Skip instruction and continue directly. */
1722 pCtx->rip += cbInstr;
1723 /* Continue execution.*/
1724 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1725 goto ResumeExecution;
1726
1727 case VMX_EXIT_CPUID: /* 10 Guest software attempted to execute CPUID. */
1728 {
1729 Log2(("VMX: Cpuid %x\n", pCtx->eax));
1730 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCpuid);
1731 rc = EMInterpretCpuId(pVM, CPUMCTX2CORE(pCtx));
1732 if (rc == VINF_SUCCESS)
1733 {
1734 /* Update EIP and continue execution. */
1735 Assert(cbInstr == 2);
1736 pCtx->rip += cbInstr;
1737 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1738 goto ResumeExecution;
1739 }
1740 AssertMsgFailed(("EMU: cpuid failed with %Vrc\n", rc));
1741 rc = VINF_EM_RAW_EMULATE_INSTR;
1742 break;
1743 }
1744
1745 case VMX_EXIT_RDTSC: /* 16 Guest software attempted to execute RDTSC. */
1746 {
1747 Log2(("VMX: Rdtsc\n"));
1748 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitRdtsc);
1749 rc = EMInterpretRdtsc(pVM, CPUMCTX2CORE(pCtx));
1750 if (rc == VINF_SUCCESS)
1751 {
1752 /* Update EIP and continue execution. */
1753 Assert(cbInstr == 2);
1754 pCtx->rip += cbInstr;
1755 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1756 goto ResumeExecution;
1757 }
1758 AssertMsgFailed(("EMU: rdtsc failed with %Vrc\n", rc));
1759 rc = VINF_EM_RAW_EMULATE_INSTR;
1760 break;
1761 }
1762
1763 case VMX_EXIT_INVPG: /* 14 Guest software attempted to execute INVPG. */
1764 {
1765 Log2(("VMX: invlpg\n"));
1766 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitInvpg);
1767 rc = EMInterpretInvlpg(pVM, CPUMCTX2CORE(pCtx), exitQualification);
1768 if (rc == VINF_SUCCESS)
1769 {
1770 /* Update EIP and continue execution. */
1771 pCtx->rip += cbInstr;
1772 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1773 goto ResumeExecution;
1774 }
1775 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: invlpg %VGv failed with %Vrc\n", exitQualification, rc));
1776 break;
1777 }
1778
1779 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
1780 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */
1781 {
1782 uint32_t cbSize;
1783
1784 /* Note: the intel manual claims there's a REX version of RDMSR that's slightly different, so we play safe by completely disassembling the instruction. */
1785 Log2(("VMX: %s\n", (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr"));
1786 rc = EMInterpretInstruction(pVM, CPUMCTX2CORE(pCtx), 0, &cbSize);
1787 if (rc == VINF_SUCCESS)
1788 {
1789 /* EIP has been updated already. */
1790
1791 /* Only resume if successful. */
1792 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1793 goto ResumeExecution;
1794 }
1795 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Vrc\n", (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr", rc));
1796 break;
1797 }
1798
1799 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */
1800 {
1801 switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification))
1802 {
1803 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:
1804 Log2(("VMX: %VGv mov cr%d, x\n", pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
1805 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxWrite);
1806 rc = EMInterpretCRxWrite(pVM, CPUMCTX2CORE(pCtx),
1807 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
1808 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification));
1809
1810 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))
1811 {
1812 case 0:
1813 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1814 break;
1815 case 2:
1816 break;
1817 case 3:
1818 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR3;
1819 break;
1820 case 4:
1821 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR4;
1822 break;
1823 case 8:
1824 /* CR8 contains the APIC TPR */
1825 Assert(!(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
1826 break;
1827
1828 default:
1829 AssertFailed();
1830 break;
1831 }
1832 /* Check if a sync operation is pending. */
1833 if ( rc == VINF_SUCCESS /* don't bother if we are going to ring 3 anyway */
1834 && VM_FF_ISPENDING(pVM, VM_FF_PGM_SYNC_CR3 | VM_FF_PGM_SYNC_CR3_NON_GLOBAL))
1835 {
1836 rc = PGMSyncCR3(pVM, CPUMGetGuestCR0(pVM), CPUMGetGuestCR3(pVM), CPUMGetGuestCR4(pVM), VM_FF_ISSET(pVM, VM_FF_PGM_SYNC_CR3));
1837 AssertRC(rc);
1838 }
1839 break;
1840
1841 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:
1842 Log2(("VMX: mov x, crx\n"));
1843 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCRxRead);
1844
1845 /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
1846 Assert(VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8 || !(pVM->hwaccm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
1847
1848 rc = EMInterpretCRxRead(pVM, CPUMCTX2CORE(pCtx),
1849 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification),
1850 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification));
1851 break;
1852
1853 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:
1854 Log2(("VMX: clts\n"));
1855 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitCLTS);
1856 rc = EMInterpretCLTS(pVM);
1857 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1858 break;
1859
1860 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:
1861 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
1862 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitLMSW);
1863 rc = EMInterpretLMSW(pVM, VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
1864 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_GUEST_CR0;
1865 break;
1866 }
1867
1868 /* Update EIP if no error occurred. */
1869 if (VBOX_SUCCESS(rc))
1870 pCtx->rip += cbInstr;
1871
1872 if (rc == VINF_SUCCESS)
1873 {
1874 /* Only resume if successful. */
1875 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1876 goto ResumeExecution;
1877 }
1878 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
1879 break;
1880 }
1881
1882 case VMX_EXIT_DRX_MOVE: /* 29 Debug-register accesses. */
1883 {
1884 /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first time and restore drx registers afterwards */
1885 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
1886 {
1887 Log2(("VMX: mov drx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification), VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
1888 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxWrite);
1889 rc = EMInterpretDRxWrite(pVM, CPUMCTX2CORE(pCtx),
1890 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
1891 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification));
1892 Log2(("DR7=%08x\n", pCtx->dr7));
1893 }
1894 else
1895 {
1896 Log2(("VMX: mov x, drx\n"));
1897 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitDRxRead);
1898 rc = EMInterpretDRxRead(pVM, CPUMCTX2CORE(pCtx),
1899 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification),
1900 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification));
1901 }
1902 /* Update EIP if no error occurred. */
1903 if (VBOX_SUCCESS(rc))
1904 pCtx->rip += cbInstr;
1905
1906 if (rc == VINF_SUCCESS)
1907 {
1908 /* Only resume if successful. */
1909 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1910 goto ResumeExecution;
1911 }
1912 Assert(rc == VERR_EM_INTERPRETER);
1913 break;
1914 }
1915
1916 /** @note We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
1917 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */
1918 {
1919 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification);
1920 uint32_t uPort;
1921 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
1922
1923 /** @todo necessary to make the distinction? */
1924 if (VMX_EXIT_QUALIFICATION_IO_ENCODING(exitQualification) == VMX_EXIT_QUALIFICATION_IO_ENCODING_DX)
1925 {
1926 uPort = pCtx->edx & 0xffff;
1927 }
1928 else
1929 uPort = VMX_EXIT_QUALIFICATION_IO_PORT(exitQualification); /* Immediate encoding. */
1930
1931 /* paranoia */
1932 if (RT_UNLIKELY(uIOWidth == 2 || uIOWidth >= 4))
1933 {
1934 rc = fIOWrite ? VINF_IOM_HC_IOPORT_WRITE : VINF_IOM_HC_IOPORT_READ;
1935 break;
1936 }
1937
1938 uint32_t cbSize = aIOSize[uIOWidth];
1939
1940 if (VMX_EXIT_QUALIFICATION_IO_STRING(exitQualification))
1941 {
1942 /* ins/outs */
1943 uint32_t prefix = 0;
1944 if (VMX_EXIT_QUALIFICATION_IO_REP(exitQualification))
1945 prefix |= PREFIX_REP;
1946
1947 if (fIOWrite)
1948 {
1949 Log2(("IOMInterpretOUTSEx %VGv %x size=%d\n", pCtx->rip, uPort, cbSize));
1950 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringWrite);
1951 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, prefix, cbSize);
1952 }
1953 else
1954 {
1955 Log2(("IOMInterpretINSEx %VGv %x size=%d\n", pCtx->rip, uPort, cbSize));
1956 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOStringRead);
1957 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, prefix, cbSize);
1958 }
1959 }
1960 else
1961 {
1962 /* normal in/out */
1963 uint32_t uAndVal = aIOOpAnd[uIOWidth];
1964
1965 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(exitQualification));
1966
1967 if (fIOWrite)
1968 {
1969 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIOWrite);
1970 rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize);
1971 }
1972 else
1973 {
1974 uint32_t u32Val = 0;
1975
1976 STAM_COUNTER_INC(&pVM->hwaccm.s.StatExitIORead);
1977 rc = IOMIOPortRead(pVM, uPort, &u32Val, cbSize);
1978 if (IOM_SUCCESS(rc))
1979 {
1980 /* Write back to the EAX register. */
1981 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
1982 }
1983 }
1984 }
1985 /*
1986 * Handled the I/O return codes.
1987 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
1988 */
1989 if (IOM_SUCCESS(rc))
1990 {
1991 /* Update EIP and continue execution. */
1992 pCtx->rip += cbInstr;
1993 if (RT_LIKELY(rc == VINF_SUCCESS))
1994 {
1995 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
1996 goto ResumeExecution;
1997 }
1998 break;
1999 }
2000
2001#ifdef VBOX_STRICT
2002 if (rc == VINF_IOM_HC_IOPORT_READ)
2003 Assert(!fIOWrite);
2004 else if (rc == VINF_IOM_HC_IOPORT_WRITE)
2005 Assert(fIOWrite);
2006 else
2007 AssertMsg(VBOX_FAILURE(rc) || rc == VINF_EM_RAW_EMULATE_INSTR || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Vrc\n", rc));
2008#endif
2009 break;
2010 }
2011
2012 case VMX_EXIT_TPR: /* 43 TPR below threshold. Guest software executed MOV to CR8. */
2013 LogFlow(("VMX_EXIT_TPR\n"));
2014 /* RIP is already set to the next instruction and the TPR has been synced back. Just resume. */
2015 goto ResumeExecution;
2016
2017 default:
2018 /* The rest is handled after syncing the entire CPU state. */
2019 break;
2020 }
2021
2022 /* Note: the guest state isn't entirely synced back at this stage. */
2023
2024 /* Investigate why there was a VM-exit. (part 2) */
2025 switch (exitReason)
2026 {
2027 case VMX_EXIT_EXCEPTION: /* 0 Exception or non-maskable interrupt (NMI). */
2028 case VMX_EXIT_EXTERNAL_IRQ: /* 1 External interrupt. */
2029 /* Already handled above. */
2030 break;
2031
2032 case VMX_EXIT_TRIPLE_FAULT: /* 2 Triple fault. */
2033 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
2034 break;
2035
2036 case VMX_EXIT_INIT_SIGNAL: /* 3 INIT signal. */
2037 case VMX_EXIT_SIPI: /* 4 Start-up IPI (SIPI). */
2038 rc = VINF_EM_RAW_INTERRUPT;
2039 AssertFailed(); /* Can't happen. Yet. */
2040 break;
2041
2042 case VMX_EXIT_IO_SMI_IRQ: /* 5 I/O system-management interrupt (SMI). */
2043 case VMX_EXIT_SMI_IRQ: /* 6 Other SMI. */
2044 rc = VINF_EM_RAW_INTERRUPT;
2045 AssertFailed(); /* Can't happen afaik. */
2046 break;
2047
2048 case VMX_EXIT_TASK_SWITCH: /* 9 Task switch. */
2049 rc = VERR_EM_INTERPRETER;
2050 break;
2051
2052 case VMX_EXIT_HLT: /* 12 Guest software attempted to execute HLT. */
2053 /** Check if external interrupts are pending; if so, don't switch back. */
2054 pCtx->rip++; /* skip hlt */
2055 if ( pCtx->eflags.Bits.u1IF
2056 && VM_FF_ISPENDING(pVM, (VM_FF_INTERRUPT_APIC|VM_FF_INTERRUPT_PIC)))
2057 goto ResumeExecution;
2058
2059 rc = VINF_EM_HALT;
2060 break;
2061
2062 case VMX_EXIT_RSM: /* 17 Guest software attempted to execute RSM in SMM. */
2063 AssertFailed(); /* can't happen. */
2064 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
2065 break;
2066
2067 case VMX_EXIT_VMCALL: /* 18 Guest software executed VMCALL. */
2068 case VMX_EXIT_VMCLEAR: /* 19 Guest software executed VMCLEAR. */
2069 case VMX_EXIT_VMLAUNCH: /* 20 Guest software executed VMLAUNCH. */
2070 case VMX_EXIT_VMPTRLD: /* 21 Guest software executed VMPTRLD. */
2071 case VMX_EXIT_VMPTRST: /* 22 Guest software executed VMPTRST. */
2072 case VMX_EXIT_VMREAD: /* 23 Guest software executed VMREAD. */
2073 case VMX_EXIT_VMRESUME: /* 24 Guest software executed VMRESUME. */
2074 case VMX_EXIT_VMWRITE: /* 25 Guest software executed VMWRITE. */
2075 case VMX_EXIT_VMXOFF: /* 26 Guest software executed VMXOFF. */
2076 case VMX_EXIT_VMXON: /* 27 Guest software executed VMXON. */
2077 /** @todo inject #UD immediately */
2078 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
2079 break;
2080
2081 case VMX_EXIT_CPUID: /* 10 Guest software attempted to execute CPUID. */
2082 case VMX_EXIT_RDTSC: /* 16 Guest software attempted to execute RDTSC. */
2083 case VMX_EXIT_INVPG: /* 14 Guest software attempted to execute INVPG. */
2084 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */
2085 case VMX_EXIT_DRX_MOVE: /* 29 Debug-register accesses. */
2086 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */
2087 /* already handled above */
2088 AssertMsg( rc == VINF_PGM_CHANGE_MODE
2089 || rc == VINF_EM_RAW_INTERRUPT
2090 || rc == VERR_EM_INTERPRETER
2091 || rc == VINF_EM_RAW_EMULATE_INSTR
2092 || rc == VINF_PGM_SYNC_CR3
2093 || rc == VINF_IOM_HC_IOPORT_READ
2094 || rc == VINF_IOM_HC_IOPORT_WRITE
2095 || rc == VINF_EM_RAW_GUEST_TRAP
2096 || rc == VINF_TRPM_XCPT_DISPATCHED
2097 || rc == VINF_EM_RESCHEDULE_REM,
2098 ("rc = %d\n", rc));
2099 break;
2100
2101 case VMX_EXIT_TPR: /* 43 TPR below threshold. Guest software executed MOV to CR8. */
2102 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
2103 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */
2104 /* Note: If we decide to emulate them here, then we must sync the MSRs that could have been changed (sysenter, fs/gs base)!!! */
2105 rc = VERR_EM_INTERPRETER;
2106 break;
2107
2108 case VMX_EXIT_RDPMC: /* 15 Guest software attempted to execute RDPMC. */
2109 case VMX_EXIT_MWAIT: /* 36 Guest software executed MWAIT. */
2110 case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */
2111 case VMX_EXIT_PAUSE: /* 40 Guest software attempted to execute PAUSE. */
2112 rc = VINF_EM_RAW_EXCEPTION_PRIVILEGED;
2113 break;
2114
2115 case VMX_EXIT_IRQ_WINDOW: /* 7 Interrupt window. */
2116 Assert(rc == VINF_EM_RAW_INTERRUPT);
2117 break;
2118
2119 case VMX_EXIT_ERR_INVALID_GUEST_STATE: /* 33 VM-entry failure due to invalid guest state. */
2120 {
2121#ifdef VBOX_STRICT
2122 Log(("VMX_EXIT_ERR_INVALID_GUEST_STATE\n"));
2123
2124 VMXReadVMCS(VMX_VMCS_GUEST_RIP, &val);
2125 Log(("Old eip %VGv new %VGv\n", pCtx->rip, (RTGCPTR)val));
2126
2127 VMXReadVMCS(VMX_VMCS_GUEST_CR0, &val);
2128 Log(("VMX_VMCS_GUEST_CR0 %RX64\n", val));
2129
2130 VMXReadVMCS(VMX_VMCS_GUEST_CR3, &val);
2131 Log(("VMX_VMCS_HOST_CR3 %VGp\n", val));
2132
2133 VMXReadVMCS(VMX_VMCS_GUEST_CR4, &val);
2134 Log(("VMX_VMCS_GUEST_CR4 %RX64\n", val));
2135
2136 VMX_LOG_SELREG(CS, "CS");
2137 VMX_LOG_SELREG(DS, "DS");
2138 VMX_LOG_SELREG(ES, "ES");
2139 VMX_LOG_SELREG(FS, "FS");
2140 VMX_LOG_SELREG(GS, "GS");
2141 VMX_LOG_SELREG(SS, "SS");
2142 VMX_LOG_SELREG(TR, "TR");
2143 VMX_LOG_SELREG(LDTR, "LDTR");
2144
2145 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_BASE, &val);
2146 Log(("VMX_VMCS_GUEST_GDTR_BASE %VGv\n", val));
2147 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_BASE, &val);
2148 Log(("VMX_VMCS_GUEST_IDTR_BASE %VGv\n", val));
2149#endif /* VBOX_STRICT */
2150 rc = VERR_EM_INTERNAL_ERROR;
2151 break;
2152 }
2153
2154 case VMX_EXIT_ERR_MSR_LOAD: /* 34 VM-entry failure due to MSR loading. */
2155 case VMX_EXIT_ERR_MACHINE_CHECK: /* 41 VM-entry failure due to machine-check. */
2156 default:
2157 rc = VERR_EM_INTERNAL_ERROR;
2158 AssertMsgFailed(("Unexpected exit code %d\n", exitReason)); /* Can't happen. */
2159 break;
2160
2161 }
2162end:
2163 if (fGuestStateSynced)
2164 {
2165 /* Remaining guest CPU context: TR, IDTR, GDTR, LDTR. */
2166 VMX_READ_SELREG(LDTR, ldtr);
2167 VMX_READ_SELREG(TR, tr);
2168
2169 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_LIMIT, &val);
2170 pCtx->gdtr.cbGdt = val;
2171 VMXReadVMCS(VMX_VMCS_GUEST_GDTR_BASE, &val);
2172 pCtx->gdtr.pGdt = val;
2173
2174 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_LIMIT, &val);
2175 pCtx->idtr.cbIdt = val;
2176 VMXReadVMCS(VMX_VMCS_GUEST_IDTR_BASE, &val);
2177 pCtx->idtr.pIdt = val;
2178
2179 /*
2180 * System MSRs
2181 */
2182 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_CS, &val);
2183 pCtx->SysEnter.cs = val;
2184 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_EIP, &val);
2185 pCtx->SysEnter.eip = val;
2186 VMXReadVMCS(VMX_VMCS_GUEST_SYSENTER_ESP, &val);
2187 pCtx->SysEnter.esp = val;
2188 }
2189
2190 /* Signal changes for the recompiler. */
2191 CPUMSetChangedFlags(pVM, CPUM_CHANGED_SYSENTER_MSR | CPUM_CHANGED_LDTR | CPUM_CHANGED_GDTR | CPUM_CHANGED_IDTR | CPUM_CHANGED_TR | CPUM_CHANGED_HIDDEN_SEL_REGS);
2192
2193 /* If we executed vmlaunch/vmresume and an external irq was pending, then we don't have to do a full sync the next time. */
2194 if ( exitReason == VMX_EXIT_EXTERNAL_IRQ
2195 && !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
2196 {
2197 STAM_COUNTER_INC(&pVM->hwaccm.s.StatPendingHostIrq);
2198 /* On the next entry we'll only sync the host context. */
2199 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_HOST_CONTEXT;
2200 }
2201 else
2202 {
2203 /* On the next entry we'll sync everything. */
2204 /** @todo we can do better than this */
2205 pVM->hwaccm.s.fContextUseFlags |= HWACCM_CHANGED_ALL;
2206 }
2207
2208 /* translate into a less severe return code */
2209 if (rc == VERR_EM_INTERPRETER)
2210 rc = VINF_EM_RAW_EMULATE_INSTR;
2211
2212 STAM_PROFILE_ADV_STOP(&pVM->hwaccm.s.StatExit, x);
2213 Log2(("X"));
2214 return rc;
2215}
2216
2217
2218/**
2219 * Enters the VT-x session
2220 *
2221 * @returns VBox status code.
2222 * @param pVM The VM to operate on.
2223 * @param pCpu CPU info struct
2224 */
2225HWACCMR0DECL(int) VMXR0Enter(PVM pVM, PHWACCM_CPUINFO pCpu)
2226{
2227 Assert(pVM->hwaccm.s.vmx.fSupported);
2228
2229 unsigned cr4 = ASMGetCR4();
2230 if (!(cr4 & X86_CR4_VMXE))
2231 {
2232 AssertMsgFailed(("X86_CR4_VMXE should be set!\n"));
2233 return VERR_VMX_X86_CR4_VMXE_CLEARED;
2234 }
2235
2236 /* Activate the VM Control Structure. */
2237 int rc = VMXActivateVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
2238 if (VBOX_FAILURE(rc))
2239 return rc;
2240
2241 pVM->hwaccm.s.vmx.fResumeVM = false;
2242 return VINF_SUCCESS;
2243}
2244
2245
2246/**
2247 * Leaves the VT-x session
2248 *
2249 * @returns VBox status code.
2250 * @param pVM The VM to operate on.
2251 */
2252HWACCMR0DECL(int) VMXR0Leave(PVM pVM)
2253{
2254 Assert(pVM->hwaccm.s.vmx.fSupported);
2255
2256 /* Clear VM Control Structure. Marking it inactive, clearing implementation specific data and writing back VMCS data to memory. */
2257 int rc = VMXClearVMCS(pVM->hwaccm.s.vmx.pVMCSPhys);
2258 AssertRC(rc);
2259
2260 return VINF_SUCCESS;
2261}
2262
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette