VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HWVMXR0.cpp@ 44259

Last change on this file since 44259 was 44259, checked in by vboxsync, 12 years ago

VMM: STAM counter naming.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Id Revision
File size: 221.7 KB
Line 
1/* $Id: HWVMXR0.cpp 44259 2013-01-09 11:02:53Z vboxsync $ */
2/** @file
3 * HM VMX (VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2006-2012 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*******************************************************************************
20* Header Files *
21*******************************************************************************/
22#define LOG_GROUP LOG_GROUP_HM
23#include <iprt/asm-amd64-x86.h>
24#include <VBox/vmm/hm.h>
25#include <VBox/vmm/pgm.h>
26#include <VBox/vmm/dbgf.h>
27#include <VBox/vmm/dbgftrace.h>
28#include <VBox/vmm/selm.h>
29#include <VBox/vmm/iom.h>
30#ifdef VBOX_WITH_REM
31# include <VBox/vmm/rem.h>
32#endif
33#include <VBox/vmm/tm.h>
34#include "HMInternal.h"
35#include <VBox/vmm/vm.h>
36#include <VBox/vmm/pdmapi.h>
37#include <VBox/err.h>
38#include <VBox/log.h>
39#include <iprt/assert.h>
40#include <iprt/param.h>
41#include <iprt/string.h>
42#include <iprt/time.h>
43#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
44# include <iprt/thread.h>
45#endif
46#include <iprt/x86.h>
47#include "HWVMXR0.h"
48
49#include "dtrace/VBoxVMM.h"
50
51
52/*******************************************************************************
53* Defined Constants And Macros *
54*******************************************************************************/
55#if defined(RT_ARCH_AMD64)
56# define VMX_IS_64BIT_HOST_MODE() (true)
57#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
58# define VMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
59#else
60# define VMX_IS_64BIT_HOST_MODE() (false)
61#endif
62
63
64/*******************************************************************************
65* Global Variables *
66*******************************************************************************/
67/* IO operation lookup arrays. */
68static uint32_t const g_aIOSize[4] = {1, 2, 0, 4};
69static uint32_t const g_aIOOpAnd[4] = {0xff, 0xffff, 0, 0xffffffff};
70
71#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
72/** See HMR0A.asm. */
73extern "C" uint32_t g_fVMXIs64bitHost;
74#endif
75
76
77/*******************************************************************************
78* Local Functions *
79*******************************************************************************/
80static DECLCALLBACK(void) hmR0VmxSetupTLBEPT(PVM pVM, PVMCPU pVCpu);
81static DECLCALLBACK(void) hmR0VmxSetupTLBVPID(PVM pVM, PVMCPU pVCpu);
82static DECLCALLBACK(void) hmR0VmxSetupTLBBoth(PVM pVM, PVMCPU pVCpu);
83static DECLCALLBACK(void) hmR0VmxSetupTLBDummy(PVM pVM, PVMCPU pVCpu);
84static void hmR0VmxFlushEPT(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
85static void hmR0VmxFlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
86static void hmR0VmxUpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
87static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite);
88static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx);
89
90
91/**
92 * Updates error from VMCS to HMCPU's lasterror record.
93 *
94 * @param pVM Pointer to the VM.
95 * @param pVCpu Pointer to the VMCPU.
96 * @param rc The error code.
97 */
98static void hmR0VmxCheckError(PVM pVM, PVMCPU pVCpu, int rc)
99{
100 if (rc == VERR_VMX_GENERIC)
101 {
102 RTCCUINTREG instrError;
103
104 VMXReadVmcs(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
105 pVCpu->hm.s.vmx.lasterror.u32InstrError = instrError;
106 }
107 pVM->hm.s.lLastError = rc;
108}
109
110
111/**
112 * Sets up and activates VT-x on the current CPU.
113 *
114 * @returns VBox status code.
115 * @param pCpu Pointer to the CPU info struct.
116 * @param pVM Pointer to the VM. (can be NULL after a resume!!)
117 * @param pvCpuPage Pointer to the global CPU page.
118 * @param HCPhysCpuPage Physical address of the global CPU page.
119 * @param fEnabledByHost Set if SUPR0EnableVTx or similar was used to enable
120 * VT-x/AMD-V on the host.
121 */
122VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBLCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
123{
124 if (!fEnabledByHost)
125 {
126 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
127 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
128
129 if (pVM)
130 {
131 /* Set revision dword at the beginning of the VMXON structure. */
132 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
133 }
134
135 /** @todo we should unmap the two pages from the virtual address space in order to prevent accidental corruption.
136 * (which can have very bad consequences!!!)
137 */
138
139 /** @todo r=bird: Why is this code different than the probing code earlier
140 * on? It just sets VMXE if needed and doesn't check that it isn't
141 * set. Mac OS X host_vmxoff may leave this set and we'll fail here
142 * and debug-assert in the calling code. This is what caused the
143 * "regression" after backing out the SUPR0EnableVTx code hours before
144 * 4.2.0GA (reboot fixed the issue). I've changed here to do the same
145 * as the init code. */
146 uint64_t uCr4 = ASMGetCR4();
147 if (!(uCr4 & X86_CR4_VMXE))
148 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE); /* Make sure the VMX instructions don't cause #UD faults. */
149
150 /*
151 * Enter VM root mode.
152 */
153 int rc = VMXEnable(HCPhysCpuPage);
154 if (RT_FAILURE(rc))
155 {
156 ASMSetCR4(uCr4);
157 return VERR_VMX_VMXON_FAILED;
158 }
159 }
160
161 /*
162 * Flush all VPIDs (in case we or any other hypervisor have been using VPIDs) so that
163 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
164 * each time while reusing a VPID after hitting the MaxASID limit once.
165 */
166 if ( pVM
167 && pVM->hm.s.vmx.fVpid
168 && (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS))
169 {
170 hmR0VmxFlushVPID(pVM, NULL /* pvCpu */, VMX_FLUSH_VPID_ALL_CONTEXTS, 0 /* GCPtr */);
171 pCpu->fFlushAsidBeforeUse = false;
172 }
173 else
174 pCpu->fFlushAsidBeforeUse = true;
175
176 /*
177 * Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}.
178 */
179 ++pCpu->cTlbFlushes;
180
181 return VINF_SUCCESS;
182}
183
184
185/**
186 * Deactivates VT-x on the current CPU.
187 *
188 * @returns VBox status code.
189 * @param pCpu Pointer to the CPU info struct.
190 * @param pvCpuPage Pointer to the global CPU page.
191 * @param HCPhysCpuPage Physical address of the global CPU page.
192 */
193VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBLCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
194{
195 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
196 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
197 NOREF(pCpu);
198
199 /* If we're somehow not in VMX root mode, then we shouldn't dare leaving it. */
200 if (!(ASMGetCR4() & X86_CR4_VMXE))
201 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
202
203 /* Leave VMX Root Mode. */
204 VMXDisable();
205
206 /* And clear the X86_CR4_VMXE bit. */
207 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
208 return VINF_SUCCESS;
209}
210
211
212/**
213 * Does Ring-0 per VM VT-x initialization.
214 *
215 * @returns VBox status code.
216 * @param pVM Pointer to the VM.
217 */
218VMMR0DECL(int) VMXR0InitVM(PVM pVM)
219{
220 int rc;
221
222#ifdef LOG_ENABLED
223 SUPR0Printf("VMXR0InitVM %p\n", pVM);
224#endif
225
226 pVM->hm.s.vmx.hMemObjApicAccess = NIL_RTR0MEMOBJ;
227
228 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
229 {
230 /* Allocate one page for the APIC physical page (serves for filtering accesses). */
231 rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.hMemObjApicAccess, PAGE_SIZE, false /* fExecutable */);
232 AssertRC(rc);
233 if (RT_FAILURE(rc))
234 return rc;
235
236 pVM->hm.s.vmx.pbApicAccess = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.hMemObjApicAccess);
237 pVM->hm.s.vmx.HCPhysApicAccess = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.hMemObjApicAccess, 0);
238 ASMMemZero32(pVM->hm.s.vmx.pbApicAccess, PAGE_SIZE);
239 }
240 else
241 {
242 pVM->hm.s.vmx.hMemObjApicAccess = 0;
243 pVM->hm.s.vmx.pbApicAccess = 0;
244 pVM->hm.s.vmx.HCPhysApicAccess = 0;
245 }
246
247#ifdef VBOX_WITH_CRASHDUMP_MAGIC
248 {
249 rc = RTR0MemObjAllocCont(&pVM->hm.s.vmx.hMemObjScratch, PAGE_SIZE, false /* fExecutable */);
250 AssertRC(rc);
251 if (RT_FAILURE(rc))
252 return rc;
253
254 pVM->hm.s.vmx.pScratch = (uint8_t *)RTR0MemObjAddress(pVM->hm.s.vmx.hMemObjScratch);
255 pVM->hm.s.vmx.pScratchPhys = RTR0MemObjGetPagePhysAddr(pVM->hm.s.vmx.hMemObjScratch, 0);
256
257 ASMMemZero32(pVM->hm.s.vmx.pbScratch, PAGE_SIZE);
258 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
259 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xDEADBEEFDEADBEEF);
260 }
261#endif
262
263 /* Allocate VMCSs for all guest CPUs. */
264 for (VMCPUID i = 0; i < pVM->cCpus; i++)
265 {
266 PVMCPU pVCpu = &pVM->aCpus[i];
267
268 pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
269
270 /* Allocate one page for the VM control structure (VMCS). */
271 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVMCS, PAGE_SIZE, false /* fExecutable */);
272 AssertRC(rc);
273 if (RT_FAILURE(rc))
274 return rc;
275
276 pVCpu->hm.s.vmx.pvVMCS = RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVMCS);
277 pVCpu->hm.s.vmx.HCPhysVMCS = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVMCS, 0);
278 ASMMemZeroPage(pVCpu->hm.s.vmx.pvVMCS);
279
280 pVCpu->hm.s.vmx.cr0_mask = 0;
281 pVCpu->hm.s.vmx.cr4_mask = 0;
282
283 /* Allocate one page for the virtual APIC page for TPR caching. */
284 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjVirtApic, PAGE_SIZE, false /* fExecutable */);
285 AssertRC(rc);
286 if (RT_FAILURE(rc))
287 return rc;
288
289 pVCpu->hm.s.vmx.pbVirtApic = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjVirtApic);
290 pVCpu->hm.s.vmx.HCPhysVirtApic = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjVirtApic, 0);
291 ASMMemZeroPage(pVCpu->hm.s.vmx.pbVirtApic);
292
293 /* Allocate the MSR bitmap if this feature is supported. */
294 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
295 {
296 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, PAGE_SIZE, false /* fExecutable */);
297 AssertRC(rc);
298 if (RT_FAILURE(rc))
299 return rc;
300
301 pVCpu->hm.s.vmx.pvMsrBitmap = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjMsrBitmap);
302 pVCpu->hm.s.vmx.HCPhysMsrBitmap = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjMsrBitmap, 0);
303 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
304 }
305
306#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
307 /* Allocate one page for the guest MSR load area (for preloading guest MSRs during the world switch). */
308 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjGuestMsr, PAGE_SIZE, false /* fExecutable */);
309 AssertRC(rc);
310 if (RT_FAILURE(rc))
311 return rc;
312
313 pVCpu->hm.s.vmx.pvGuestMsr = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjGuestMsr);
314 pVCpu->hm.s.vmx.HCPhysGuestMsr = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjGuestMsr, 0);
315 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf));
316 memset(pVCpu->hm.s.vmx.pvGuestMsr, 0, PAGE_SIZE);
317
318 /* Allocate one page for the host MSR load area (for restoring host MSRs after the world switch back). */
319 rc = RTR0MemObjAllocCont(&pVCpu->hm.s.vmx.hMemObjHostMsr, PAGE_SIZE, false /* fExecutable */);
320 AssertRC(rc);
321 if (RT_FAILURE(rc))
322 return rc;
323
324 pVCpu->hm.s.vmx.pvHostMsr = (uint8_t *)RTR0MemObjAddress(pVCpu->hm.s.vmx.hMemObjHostMsr);
325 pVCpu->hm.s.vmx.HCPhysHostMsr = RTR0MemObjGetPagePhysAddr(pVCpu->hm.s.vmx.hMemObjHostMsr, 0);
326 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf));
327 memset(pVCpu->hm.s.vmx.pvHostMsr, 0, PAGE_SIZE);
328#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
329
330 /* Current guest paging mode. */
331 pVCpu->hm.s.vmx.enmLastSeenGuestMode = PGMMODE_REAL;
332
333#ifdef LOG_ENABLED
334 SUPR0Printf("VMXR0InitVM %x VMCS=%x (%x)\n", pVM, pVCpu->hm.s.vmx.pvVMCS, (uint32_t)pVCpu->hm.s.vmx.HCPhysVMCS);
335#endif
336 }
337
338 return VINF_SUCCESS;
339}
340
341
342/**
343 * Does Ring-0 per VM VT-x termination.
344 *
345 * @returns VBox status code.
346 * @param pVM Pointer to the VM.
347 */
348VMMR0DECL(int) VMXR0TermVM(PVM pVM)
349{
350 for (VMCPUID i = 0; i < pVM->cCpus; i++)
351 {
352 PVMCPU pVCpu = &pVM->aCpus[i];
353
354 if (pVCpu->hm.s.vmx.hMemObjVMCS != NIL_RTR0MEMOBJ)
355 {
356 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVMCS, false);
357 pVCpu->hm.s.vmx.hMemObjVMCS = NIL_RTR0MEMOBJ;
358 pVCpu->hm.s.vmx.pvVMCS = 0;
359 pVCpu->hm.s.vmx.HCPhysVMCS = 0;
360 }
361 if (pVCpu->hm.s.vmx.hMemObjVirtApic != NIL_RTR0MEMOBJ)
362 {
363 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjVirtApic, false);
364 pVCpu->hm.s.vmx.hMemObjVirtApic = NIL_RTR0MEMOBJ;
365 pVCpu->hm.s.vmx.pbVirtApic = 0;
366 pVCpu->hm.s.vmx.HCPhysVirtApic = 0;
367 }
368 if (pVCpu->hm.s.vmx.hMemObjMsrBitmap != NIL_RTR0MEMOBJ)
369 {
370 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjMsrBitmap, false);
371 pVCpu->hm.s.vmx.hMemObjMsrBitmap = NIL_RTR0MEMOBJ;
372 pVCpu->hm.s.vmx.pvMsrBitmap = 0;
373 pVCpu->hm.s.vmx.HCPhysMsrBitmap = 0;
374 }
375#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
376 if (pVCpu->hm.s.vmx.hMemObjHostMsr != NIL_RTR0MEMOBJ)
377 {
378 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjHostMsr, false);
379 pVCpu->hm.s.vmx.hMemObjHostMsr = NIL_RTR0MEMOBJ;
380 pVCpu->hm.s.vmx.pvHostMsr = 0;
381 pVCpu->hm.s.vmx.HCPhysHostMsr = 0;
382 }
383 if (pVCpu->hm.s.vmx.hMemObjGuestMsr != NIL_RTR0MEMOBJ)
384 {
385 RTR0MemObjFree(pVCpu->hm.s.vmx.hMemObjGuestMsr, false);
386 pVCpu->hm.s.vmx.hMemObjGuestMsr = NIL_RTR0MEMOBJ;
387 pVCpu->hm.s.vmx.pvGuestMsr = 0;
388 pVCpu->hm.s.vmx.HCPhysGuestMsr = 0;
389 }
390#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
391 }
392 if (pVM->hm.s.vmx.hMemObjApicAccess != NIL_RTR0MEMOBJ)
393 {
394 RTR0MemObjFree(pVM->hm.s.vmx.hMemObjApicAccess, false);
395 pVM->hm.s.vmx.hMemObjApicAccess = NIL_RTR0MEMOBJ;
396 pVM->hm.s.vmx.pbApicAccess = 0;
397 pVM->hm.s.vmx.HCPhysApicAccess = 0;
398 }
399#ifdef VBOX_WITH_CRASHDUMP_MAGIC
400 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
401 {
402 ASMMemZero32(pVM->hm.s.vmx.pScratch, PAGE_SIZE);
403 RTR0MemObjFree(pVM->hm.s.vmx.hMemObjScratch, false);
404 pVM->hm.s.vmx.hMemObjScratch = NIL_RTR0MEMOBJ;
405 pVM->hm.s.vmx.pScratch = 0;
406 pVM->hm.s.vmx.pScratchPhys = 0;
407 }
408#endif
409 return VINF_SUCCESS;
410}
411
412
413/**
414 * Sets up VT-x for the specified VM.
415 *
416 * @returns VBox status code.
417 * @param pVM Pointer to the VM.
418 */
419VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
420{
421 int rc = VINF_SUCCESS;
422 uint32_t val;
423
424 AssertReturn(pVM, VERR_INVALID_PARAMETER);
425
426 /* Initialize these always, see hmR3InitFinalizeR0().*/
427 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
428 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
429
430 /* Determine optimal flush type for EPT. */
431 if (pVM->hm.s.fNestedPaging)
432 {
433 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
434 {
435 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
436 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
437 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
438 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
439 else
440 {
441 /*
442 * Should never really happen. EPT is supported but no suitable flush types supported.
443 * We cannot ignore EPT at this point as we've already setup Unrestricted Guest execution.
444 */
445 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
446 return VERR_VMX_GENERIC;
447 }
448 }
449 else
450 {
451 /*
452 * Should never really happen. EPT is supported but INVEPT instruction is not supported.
453 */
454 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
455 return VERR_VMX_GENERIC;
456 }
457 }
458
459 /* Determine optimal flush type for VPID. */
460 if (pVM->hm.s.vmx.fVpid)
461 {
462 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
463 {
464 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
465 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
466 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
467 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
468 else
469 {
470 /*
471 * Neither SINGLE nor ALL context flush types for VPID supported by the CPU.
472 * We do not handle other flush type combinations, ignore VPID capabilities.
473 */
474 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
475 Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_INDIV_ADDR supported. Ignoring VPID.\n"));
476 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
477 Log(("VMXR0SetupVM: Only VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
478 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
479 pVM->hm.s.vmx.fVpid = false;
480 }
481 }
482 else
483 {
484 /*
485 * Should not really happen. EPT is supported but INVEPT is not supported.
486 * Ignore VPID capabilities as our code relies on using INVEPT for selective flushing.
487 */
488 Log(("VMXR0SetupVM: VPID supported without INVEPT support. Ignoring VPID.\n"));
489 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
490 pVM->hm.s.vmx.fVpid = false;
491 }
492 }
493
494 for (VMCPUID i = 0; i < pVM->cCpus; i++)
495 {
496 PVMCPU pVCpu = &pVM->aCpus[i];
497
498 AssertPtr(pVCpu->hm.s.vmx.pvVMCS);
499
500 /* Set revision dword at the beginning of the VMCS structure. */
501 *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
502
503 /*
504 * Clear and activate the VMCS.
505 */
506 Log(("HCPhysVMCS = %RHp\n", pVCpu->hm.s.vmx.HCPhysVMCS));
507 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
508 if (RT_FAILURE(rc))
509 goto vmx_end;
510
511 rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
512 if (RT_FAILURE(rc))
513 goto vmx_end;
514
515 /*
516 * VMX_VMCS_CTRL_PIN_EXEC_CONTROLS
517 * Set required bits to one and zero according to the MSR capabilities.
518 */
519 val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0;
520 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_EXT_INT_EXIT /* External interrupts */
521 | VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_NMI_EXIT; /* Non-maskable interrupts */
522
523 /*
524 * Enable the VMX preemption timer.
525 */
526 if (pVM->hm.s.vmx.fUsePreemptTimer)
527 val |= VMX_VMCS_CTRL_PIN_EXEC_CONTROLS_PREEMPT_TIMER;
528 val &= pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1;
529
530 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, val);
531 AssertRC(rc);
532
533 /*
534 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS
535 * Set required bits to one and zero according to the MSR capabilities.
536 */
537 val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0;
538 /* Program which event cause VM-exits and which features we want to use. */
539 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_HLT_EXIT
540 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_TSC_OFFSET
541 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT
542 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_UNCOND_IO_EXIT
543 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDPMC_EXIT
544 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_EXIT
545 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT; /* don't execute mwait or else we'll idle inside
546 the guest (host thinks the cpu load is high) */
547
548 /* Without nested paging we should intercept invlpg and cr3 mov instructions. */
549 if (!pVM->hm.s.fNestedPaging)
550 {
551 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
552 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
553 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
554 }
555
556 /*
557 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MWAIT_EXIT might cause a vmlaunch
558 * failure with an invalid control fields error. (combined with some other exit reasons)
559 */
560 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
561 {
562 /* CR8 reads from the APIC shadow page; writes cause an exit is they lower the TPR below the threshold */
563 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW;
564 Assert(pVM->hm.s.vmx.pbApicAccess);
565 }
566 else
567 /* Exit on CR8 reads & writes in case the TPR shadow feature isn't present. */
568 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_STORE_EXIT | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR8_LOAD_EXIT;
569
570 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
571 {
572 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
573 val |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS;
574 }
575
576 /* We will use the secondary control if it's present. */
577 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
578
579 /* Mask away the bits that the CPU doesn't support */
580 /** @todo make sure they don't conflict with the above requirements. */
581 val &= pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1;
582 pVCpu->hm.s.vmx.u32ProcCtls = val;
583
584 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, val);
585 AssertRC(rc);
586
587 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
588 {
589 /*
590 * VMX_VMCS_CTRL_PROC_EXEC_CONTROLS2
591 * Set required bits to one and zero according to the MSR capabilities.
592 */
593 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0;
594 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT;
595
596 if (pVM->hm.s.fNestedPaging)
597 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT;
598
599 if (pVM->hm.s.vmx.fVpid)
600 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID;
601
602 if (pVM->hm.s.fHasIoApic)
603 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC;
604
605 if (pVM->hm.s.vmx.fUnrestrictedGuest)
606 val |= VMX_VMCS_CTRL_PROC_EXEC2_REAL_MODE;
607
608 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
609 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP;
610
611 /* Mask away the bits that the CPU doesn't support */
612 /** @todo make sure they don't conflict with the above requirements. */
613 val &= pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1;
614 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
615 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS2, val);
616 AssertRC(rc);
617 }
618
619 /*
620 * VMX_VMCS_CTRL_CR3_TARGET_COUNT
621 * Set required bits to one and zero according to the MSR capabilities.
622 */
623 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0);
624 AssertRC(rc);
625
626 /*
627 * Forward all exception except #NM & #PF to the guest.
628 * We always need to check pagefaults since our shadow page table can be out of sync.
629 * And we always lazily sync the FPU & XMM state. .
630 */
631
632 /** @todo Possible optimization:
633 * Keep the FPU and XMM state current in the EM thread. That way there's no need to
634 * lazily sync anything, but the downside is that we can't use the FPU stack or XMM
635 * registers ourselves of course.
636 *
637 * Note: only possible if the current state is actually ours (X86_CR0_TS flag)
638 */
639
640 /*
641 * Don't filter page faults, all of them should cause a world switch.
642 */
643 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0);
644 AssertRC(rc);
645 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0);
646 AssertRC(rc);
647
648 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0);
649 AssertRC(rc);
650 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0);
651 AssertRC(rc);
652 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0);
653 AssertRC(rc);
654
655 /*
656 * Set the MSR bitmap address.
657 */
658 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_MSR_BITMAPS)
659 {
660 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
661
662 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
663 AssertRC(rc);
664
665 /*
666 * Allow the guest to directly modify these MSRs; they are loaded/stored automatically
667 * using MSR-load/store areas in the VMCS.
668 */
669 hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_CS, true, true);
670 hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_ESP, true, true);
671 hmR0VmxSetMSRPermission(pVCpu, MSR_IA32_SYSENTER_EIP, true, true);
672 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
673 hmR0VmxSetMSRPermission(pVCpu, MSR_K6_STAR, true, true);
674 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_SF_MASK, true, true);
675 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, true, true);
676 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_GS_BASE, true, true);
677 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_FS_BASE, true, true);
678 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
679 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_TSC_AUX, true, true);
680 }
681
682#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
683 /*
684 * Set the guest & host MSR load/store physical addresses.
685 */
686 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
687 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
688 AssertRC(rc);
689 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
690 AssertRC(rc);
691 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
692 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
693 AssertRC(rc);
694#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
695
696 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0);
697 AssertRC(rc);
698 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0);
699 AssertRC(rc);
700 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0);
701 AssertRC(rc);
702
703 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW)
704 {
705 Assert(pVM->hm.s.vmx.hMemObjApicAccess);
706 /* Optional */
707 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
708 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
709
710 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
711 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
712
713 AssertRC(rc);
714 }
715
716 /* Set link pointer to -1. Not currently used. */
717 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, 0xFFFFFFFFFFFFFFFFULL);
718 AssertRC(rc);
719
720 /*
721 * Clear VMCS, marking it inactive. Clear implementation specific data and writing back
722 * VMCS data back to memory.
723 */
724 rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
725 AssertRC(rc);
726
727 /*
728 * Configure the VMCS read cache.
729 */
730 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
731
732 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_RIP);
733 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_RSP);
734 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_RFLAGS);
735 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE);
736 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_CTRL_CR0_READ_SHADOW);
737 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_CR0);
738 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_CTRL_CR4_READ_SHADOW);
739 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_CR4);
740 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_DR7);
741 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_GUEST_SYSENTER_CS);
742 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
743 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
744 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_GUEST_GDTR_LIMIT);
745 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_GDTR_BASE);
746 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_GUEST_IDTR_LIMIT);
747 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_IDTR_BASE);
748
749 VMX_SETUP_SELREG(ES, pCache);
750 VMX_SETUP_SELREG(SS, pCache);
751 VMX_SETUP_SELREG(CS, pCache);
752 VMX_SETUP_SELREG(DS, pCache);
753 VMX_SETUP_SELREG(FS, pCache);
754 VMX_SETUP_SELREG(GS, pCache);
755 VMX_SETUP_SELREG(LDTR, pCache);
756 VMX_SETUP_SELREG(TR, pCache);
757
758 /*
759 * Status code VMCS reads.
760 */
761 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_RO_EXIT_REASON);
762 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_RO_VM_INSTR_ERROR);
763 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_RO_EXIT_INSTR_LENGTH);
764 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE);
765 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO);
766 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_RO_EXIT_INSTR_INFO);
767 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
768 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_RO_IDT_INFO);
769 VMXSetupCachedReadVmcs(pCache, VMX_VMCS32_RO_IDT_ERRCODE);
770
771 if (pVM->hm.s.fNestedPaging)
772 {
773 VMXSetupCachedReadVmcs(pCache, VMX_VMCS_GUEST_CR3);
774 VMXSetupCachedReadVmcs(pCache, VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL);
775 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
776 }
777 else
778 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
779 } /* for each VMCPU */
780
781 /*
782 * Setup the right TLB function based on CPU capabilities.
783 */
784 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
785 pVM->hm.s.vmx.pfnFlushTaggedTlb = hmR0VmxSetupTLBBoth;
786 else if (pVM->hm.s.fNestedPaging)
787 pVM->hm.s.vmx.pfnFlushTaggedTlb = hmR0VmxSetupTLBEPT;
788 else if (pVM->hm.s.vmx.fVpid)
789 pVM->hm.s.vmx.pfnFlushTaggedTlb = hmR0VmxSetupTLBVPID;
790 else
791 pVM->hm.s.vmx.pfnFlushTaggedTlb = hmR0VmxSetupTLBDummy;
792
793vmx_end:
794 hmR0VmxCheckError(pVM, &pVM->aCpus[0], rc);
795 return rc;
796}
797
798
799/**
800 * Sets the permission bits for the specified MSR.
801 *
802 * @param pVCpu Pointer to the VMCPU.
803 * @param ulMSR The MSR value.
804 * @param fRead Whether reading is allowed.
805 * @param fWrite Whether writing is allowed.
806 */
807static void hmR0VmxSetMSRPermission(PVMCPU pVCpu, unsigned ulMSR, bool fRead, bool fWrite)
808{
809 unsigned ulBit;
810 uint8_t *pvMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
811
812 /*
813 * Layout:
814 * 0x000 - 0x3ff - Low MSR read bits
815 * 0x400 - 0x7ff - High MSR read bits
816 * 0x800 - 0xbff - Low MSR write bits
817 * 0xc00 - 0xfff - High MSR write bits
818 */
819 if (ulMSR <= 0x00001FFF)
820 {
821 /* Pentium-compatible MSRs */
822 ulBit = ulMSR;
823 }
824 else if ( ulMSR >= 0xC0000000
825 && ulMSR <= 0xC0001FFF)
826 {
827 /* AMD Sixth Generation x86 Processor MSRs */
828 ulBit = (ulMSR - 0xC0000000);
829 pvMsrBitmap += 0x400;
830 }
831 else
832 {
833 AssertFailed();
834 return;
835 }
836
837 Assert(ulBit <= 0x1fff);
838 if (fRead)
839 ASMBitClear(pvMsrBitmap, ulBit);
840 else
841 ASMBitSet(pvMsrBitmap, ulBit);
842
843 if (fWrite)
844 ASMBitClear(pvMsrBitmap + 0x800, ulBit);
845 else
846 ASMBitSet(pvMsrBitmap + 0x800, ulBit);
847}
848
849
850/**
851 * Injects an event (trap or external interrupt).
852 *
853 * @returns VBox status code. Note that it may return VINF_EM_RESET to
854 * indicate a triple fault when injecting X86_XCPT_DF.
855 *
856 * @param pVM Pointer to the VM.
857 * @param pVCpu Pointer to the VMCPU.
858 * @param pCtx Pointer to the guest CPU Context.
859 * @param intInfo VMX interrupt info.
860 * @param cbInstr Opcode length of faulting instruction.
861 * @param errCode Error code (optional).
862 */
863static int hmR0VmxInjectEvent(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, uint32_t intInfo, uint32_t cbInstr, uint32_t errCode)
864{
865 int rc;
866 uint32_t iGate = VMX_EXIT_INTERRUPTION_INFO_VECTOR(intInfo);
867
868#ifdef VBOX_WITH_STATISTICS
869 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[iGate & MASK_INJECT_IRQ_STAT]);
870#endif
871
872#ifdef VBOX_STRICT
873 if (iGate == 0xE)
874 {
875 LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x CR2=%RGv intInfo=%08x\n", iGate,
876 (RTGCPTR)pCtx->rip, errCode, pCtx->cr2, intInfo));
877 }
878 else if (iGate < 0x20)
879 {
880 LogFlow(("hmR0VmxInjectEvent: Injecting interrupt %d at %RGv error code=%08x\n", iGate, (RTGCPTR)pCtx->rip,
881 errCode));
882 }
883 else
884 {
885 LogFlow(("INJ-EI: %x at %RGv\n", iGate, (RTGCPTR)pCtx->rip));
886 Assert( VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT
887 || !VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
888 Assert( VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT
889 || pCtx->eflags.u32 & X86_EFL_IF);
890 }
891#endif
892
893 if ( CPUMIsGuestInRealModeEx(pCtx)
894 && pVM->hm.s.vmx.pRealModeTSS)
895 {
896 RTGCPHYS GCPhysHandler;
897 uint16_t offset, ip;
898 RTSEL sel;
899
900 /*
901 * Injecting events doesn't work right with real mode emulation.
902 * (#GP if we try to inject external hardware interrupts)
903 * Inject the interrupt or trap directly instead.
904 *
905 * ASSUMES no access handlers for the bits we read or write below (should be safe).
906 */
907 Log(("Manual interrupt/trap '%x' inject (real mode)\n", iGate));
908
909 /*
910 * Check if the interrupt handler is present.
911 */
912 if (iGate * 4 + 3 > pCtx->idtr.cbIdt)
913 {
914 Log(("IDT cbIdt violation\n"));
915 if (iGate != X86_XCPT_DF)
916 {
917 uint32_t intInfo2;
918
919 intInfo2 = (iGate == X86_XCPT_GP) ? (uint32_t)X86_XCPT_DF : iGate;
920 intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
921 intInfo2 |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
922 intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
923
924 return hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo2, 0, 0 /* no error code according to the Intel docs */);
925 }
926 Log(("Triple fault -> reset the VM!\n"));
927 return VINF_EM_RESET;
928 }
929 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT
930 || iGate == 3 /* Both #BP and #OF point to the instruction after. */
931 || iGate == 4)
932 {
933 ip = pCtx->ip + cbInstr;
934 }
935 else
936 ip = pCtx->ip;
937
938 /*
939 * Read the selector:offset pair of the interrupt handler.
940 */
941 GCPhysHandler = (RTGCPHYS)pCtx->idtr.pIdt + iGate * 4;
942 rc = PGMPhysSimpleReadGCPhys(pVM, &offset, GCPhysHandler, sizeof(offset)); AssertRC(rc);
943 rc = PGMPhysSimpleReadGCPhys(pVM, &sel, GCPhysHandler + 2, sizeof(sel)); AssertRC(rc);
944
945 LogFlow(("IDT handler %04X:%04X\n", sel, offset));
946
947 /*
948 * Construct the stack frame.
949 */
950 /** @todo Check stack limit. */
951 pCtx->sp -= 2;
952 LogFlow(("ss:sp %04X:%04X eflags=%x\n", pCtx->ss.Sel, pCtx->sp, pCtx->eflags.u));
953 rc = PGMPhysSimpleWriteGCPhys(pVM, pCtx->ss.u64Base + pCtx->sp, &pCtx->eflags, sizeof(uint16_t)); AssertRC(rc);
954 pCtx->sp -= 2;
955 LogFlow(("ss:sp %04X:%04X cs=%x\n", pCtx->ss.Sel, pCtx->sp, pCtx->cs.Sel));
956 rc = PGMPhysSimpleWriteGCPhys(pVM, pCtx->ss.u64Base + pCtx->sp, &pCtx->cs, sizeof(uint16_t)); AssertRC(rc);
957 pCtx->sp -= 2;
958 LogFlow(("ss:sp %04X:%04X ip=%x\n", pCtx->ss.Sel, pCtx->sp, ip));
959 rc = PGMPhysSimpleWriteGCPhys(pVM, pCtx->ss.u64Base + pCtx->sp, &ip, sizeof(ip)); AssertRC(rc);
960
961 /*
962 * Update the CPU state for executing the handler.
963 */
964 pCtx->rip = offset;
965 pCtx->cs.Sel = sel;
966 pCtx->cs.u64Base = sel << 4;
967 pCtx->eflags.u &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
968
969 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS;
970 return VINF_SUCCESS;
971 }
972
973 /*
974 * Set event injection state.
975 */
976 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, intInfo | (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT));
977 rc |= VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
978 rc |= VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, errCode);
979
980 AssertRC(rc);
981 return rc;
982}
983
984
985/**
986 * Checks for pending guest interrupts and injects them.
987 *
988 * @returns VBox status code.
989 * @param pVM Pointer to the VM.
990 * @param pVCpu Pointer to the VMCPU.
991 * @param pCtx Pointer to the guest CPU context.
992 */
993static int hmR0VmxCheckPendingInterrupt(PVM pVM, PVMCPU pVCpu, CPUMCTX *pCtx)
994{
995 int rc;
996
997 /*
998 * Dispatch any pending interrupts (injected before, but a VM exit occurred prematurely).
999 */
1000 if (pVCpu->hm.s.Event.fPending)
1001 {
1002 Log(("CPU%d: Reinjecting event %RX64 %08x at %RGv cr2=%RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntrInfo,
1003 pVCpu->hm.s.Event.u32ErrCode, (RTGCPTR)pCtx->rip, pCtx->cr2));
1004 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntReinject);
1005 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, pVCpu->hm.s.Event.u64IntrInfo, 0, pVCpu->hm.s.Event.u32ErrCode);
1006 AssertRC(rc);
1007
1008 pVCpu->hm.s.Event.fPending = false;
1009 return VINF_SUCCESS;
1010 }
1011
1012 /*
1013 * If an active trap is already pending, we must forward it first!
1014 */
1015 if (!TRPMHasTrap(pVCpu))
1016 {
1017 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI))
1018 {
1019 RTGCUINTPTR intInfo;
1020
1021 Log(("CPU%d: injecting #NMI\n", pVCpu->idCpu));
1022
1023 intInfo = X86_XCPT_NMI;
1024 intInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
1025 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
1026
1027 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo, 0, 0);
1028 AssertRC(rc);
1029
1030 return VINF_SUCCESS;
1031 }
1032
1033 /** @todo SMI interrupts. */
1034
1035 /*
1036 * When external interrupts are pending, we should exit the VM when IF is set.
1037 */
1038 if (VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)))
1039 {
1040 if (!(pCtx->eflags.u32 & X86_EFL_IF))
1041 {
1042 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT))
1043 {
1044 LogFlow(("Enable irq window exit!\n"));
1045 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
1046 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
1047 AssertRC(rc);
1048 }
1049 /* else nothing to do but wait */
1050 }
1051 else if (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1052 {
1053 uint8_t u8Interrupt;
1054
1055 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
1056 Log(("CPU%d: Dispatch interrupt: u8Interrupt=%x (%d) rc=%Rrc cs:rip=%04X:%RGv\n", pVCpu->idCpu,
1057 u8Interrupt, u8Interrupt, rc, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
1058 if (RT_SUCCESS(rc))
1059 {
1060 rc = TRPMAssertTrap(pVCpu, u8Interrupt, TRPM_HARDWARE_INT);
1061 AssertRC(rc);
1062 }
1063 else
1064 {
1065 /* Can only happen in rare cases where a pending interrupt is cleared behind our back */
1066 Assert(!VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)));
1067 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
1068 /* Just continue */
1069 }
1070 }
1071 else
1072 Log(("Pending interrupt blocked at %RGv by VM_FF_INHIBIT_INTERRUPTS!!\n", (RTGCPTR)pCtx->rip));
1073 }
1074 }
1075
1076#ifdef VBOX_STRICT
1077 if (TRPMHasTrap(pVCpu))
1078 {
1079 uint8_t u8Vector;
1080 rc = TRPMQueryTrapAll(pVCpu, &u8Vector, 0, 0, 0);
1081 AssertRC(rc);
1082 }
1083#endif
1084
1085 if ( (pCtx->eflags.u32 & X86_EFL_IF)
1086 && (!VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
1087 && TRPMHasTrap(pVCpu)
1088 )
1089 {
1090 uint8_t u8Vector;
1091 TRPMEVENT enmType;
1092 RTGCUINTPTR intInfo;
1093 RTGCUINT errCode;
1094
1095 /*
1096 * If a new event is pending, dispatch it now.
1097 */
1098 rc = TRPMQueryTrapAll(pVCpu, &u8Vector, &enmType, &errCode, 0);
1099 AssertRC(rc);
1100 Assert(pCtx->eflags.Bits.u1IF == 1 || enmType == TRPM_TRAP);
1101 Assert(enmType != TRPM_SOFTWARE_INT);
1102
1103 /*
1104 * Clear the pending trap.
1105 */
1106 rc = TRPMResetTrap(pVCpu);
1107 AssertRC(rc);
1108
1109 intInfo = u8Vector;
1110 intInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
1111
1112 if (enmType == TRPM_TRAP)
1113 {
1114 switch (u8Vector)
1115 {
1116 case X86_XCPT_DF:
1117 case X86_XCPT_TS:
1118 case X86_XCPT_NP:
1119 case X86_XCPT_SS:
1120 case X86_XCPT_GP:
1121 case X86_XCPT_PF:
1122 case X86_XCPT_AC:
1123 {
1124 /* Valid error codes. */
1125 intInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
1126 break;
1127 }
1128
1129 default:
1130 break;
1131 }
1132
1133 if ( u8Vector == X86_XCPT_BP
1134 || u8Vector == X86_XCPT_OF)
1135 {
1136 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
1137 }
1138 else
1139 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
1140 }
1141 else
1142 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
1143
1144 STAM_COUNTER_INC(&pVCpu->hm.s.StatIntInject);
1145 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo, 0, errCode);
1146 AssertRC(rc);
1147 } /* if (interrupts can be dispatched) */
1148
1149 return VINF_SUCCESS;
1150}
1151
1152/**
1153 * Checks for pending VMX events and converts them to TRPM. Before we execute any instruction
1154 * outside of VMX, any pending VMX event must be converted so that it can be delivered properly.
1155 *
1156 * @returns VBox status code.
1157 * @param pVCpu Pointer to the VMCPU.
1158 */
1159static int hmR0VmxCheckPendingEvent(PVMCPU pVCpu)
1160{
1161 if (pVCpu->hm.s.Event.fPending)
1162 {
1163 TRPMEVENT enmTrapType;
1164
1165 /* If a trap was already pending, we did something wrong! */
1166 Assert((TRPMQueryTrap(pVCpu, NULL, NULL) == VERR_TRPM_NO_ACTIVE_TRAP));
1167
1168 /*
1169 * Clear the pending event and move it over to TRPM for the rest
1170 * of the world to see.
1171 */
1172 pVCpu->hm.s.Event.fPending = false;
1173 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo))
1174 {
1175 case VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT:
1176 case VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI:
1177 enmTrapType = TRPM_HARDWARE_INT;
1178 break;
1179 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT:
1180 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT:
1181 case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT:
1182 enmTrapType = TRPM_SOFTWARE_INT;
1183 break;
1184 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
1185 enmTrapType = TRPM_TRAP;
1186 break;
1187 default:
1188 enmTrapType = TRPM_32BIT_HACK; /* Can't get here. */
1189 AssertFailed();
1190 }
1191 TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo), enmTrapType);
1192 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
1193 TRPMSetErrorCode(pVCpu, pVCpu->hm.s.Event.u32ErrCode);
1194 //@todo: Is there any situation where we need to call TRPMSetFaultAddress()?
1195 }
1196 return VINF_SUCCESS;
1197}
1198
1199/**
1200 * Save the host state into the VMCS.
1201 *
1202 * @returns VBox status code.
1203 * @param pVM Pointer to the VM.
1204 * @param pVCpu Pointer to the VMCPU.
1205 */
1206VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
1207{
1208 int rc = VINF_SUCCESS;
1209 NOREF(pVM);
1210
1211 /*
1212 * Host CPU Context.
1213 */
1214 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
1215 {
1216 RTIDTR idtr;
1217 RTGDTR gdtr;
1218 RTSEL SelTR;
1219 PCX86DESCHC pDesc;
1220 uintptr_t trBase;
1221 RTSEL cs;
1222 RTSEL ss;
1223 uint64_t cr3;
1224
1225 /*
1226 * Control registers.
1227 */
1228 rc = VMXWriteVmcs(VMX_VMCS_HOST_CR0, ASMGetCR0());
1229 Log2(("VMX_VMCS_HOST_CR0 %08x\n", ASMGetCR0()));
1230#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1231 if (VMX_IS_64BIT_HOST_MODE())
1232 {
1233 cr3 = hmR0Get64bitCR3();
1234 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_CR3, cr3);
1235 }
1236 else
1237#endif
1238 {
1239 cr3 = ASMGetCR3();
1240 rc |= VMXWriteVmcs(VMX_VMCS_HOST_CR3, cr3);
1241 }
1242 Log2(("VMX_VMCS_HOST_CR3 %08RX64\n", cr3));
1243 rc |= VMXWriteVmcs(VMX_VMCS_HOST_CR4, ASMGetCR4());
1244 Log2(("VMX_VMCS_HOST_CR4 %08x\n", ASMGetCR4()));
1245 AssertRC(rc);
1246
1247 /*
1248 * Selector registers.
1249 */
1250#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1251 if (VMX_IS_64BIT_HOST_MODE())
1252 {
1253 cs = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
1254 ss = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
1255 }
1256 else
1257 {
1258 /* sysenter loads LDT cs & ss, VMX doesn't like this. Load the GDT ones (safe). */
1259 cs = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
1260 ss = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
1261 }
1262#else
1263 cs = ASMGetCS();
1264 ss = ASMGetSS();
1265#endif
1266 Assert(!(cs & X86_SEL_LDT)); Assert((cs & X86_SEL_RPL) == 0);
1267 Assert(!(ss & X86_SEL_LDT)); Assert((ss & X86_SEL_RPL) == 0);
1268 rc = VMXWriteVmcs(VMX_VMCS16_HOST_FIELD_CS, cs);
1269 /* Note: VMX is (again) very picky about the RPL of the selectors here; we'll restore them manually. */
1270 rc |= VMXWriteVmcs(VMX_VMCS16_HOST_FIELD_DS, 0);
1271 rc |= VMXWriteVmcs(VMX_VMCS16_HOST_FIELD_ES, 0);
1272#if HC_ARCH_BITS == 32
1273 if (!VMX_IS_64BIT_HOST_MODE())
1274 {
1275 rc |= VMXWriteVmcs(VMX_VMCS16_HOST_FIELD_FS, 0);
1276 rc |= VMXWriteVmcs(VMX_VMCS16_HOST_FIELD_GS, 0);
1277 }
1278#endif
1279 rc |= VMXWriteVmcs(VMX_VMCS16_HOST_FIELD_SS, ss);
1280 SelTR = ASMGetTR();
1281 rc |= VMXWriteVmcs(VMX_VMCS16_HOST_FIELD_TR, SelTR);
1282 AssertRC(rc);
1283 Log2(("VMX_VMCS_HOST_FIELD_CS %08x (%08x)\n", cs, ASMGetSS()));
1284 Log2(("VMX_VMCS_HOST_FIELD_DS 00000000 (%08x)\n", ASMGetDS()));
1285 Log2(("VMX_VMCS_HOST_FIELD_ES 00000000 (%08x)\n", ASMGetES()));
1286 Log2(("VMX_VMCS_HOST_FIELD_FS 00000000 (%08x)\n", ASMGetFS()));
1287 Log2(("VMX_VMCS_HOST_FIELD_GS 00000000 (%08x)\n", ASMGetGS()));
1288 Log2(("VMX_VMCS_HOST_FIELD_SS %08x (%08x)\n", ss, ASMGetSS()));
1289 Log2(("VMX_VMCS_HOST_FIELD_TR %08x\n", ASMGetTR()));
1290
1291 /*
1292 * GDTR & IDTR.
1293 */
1294#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1295 if (VMX_IS_64BIT_HOST_MODE())
1296 {
1297 X86XDTR64 gdtr64, idtr64;
1298 hmR0Get64bitGdtrAndIdtr(&gdtr64, &idtr64);
1299 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, gdtr64.uAddr);
1300 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, idtr64.uAddr);
1301 AssertRC(rc);
1302 Log2(("VMX_VMCS_HOST_GDTR_BASE %RX64\n", gdtr64.uAddr));
1303 Log2(("VMX_VMCS_HOST_IDTR_BASE %RX64\n", idtr64.uAddr));
1304 gdtr.cbGdt = gdtr64.cb;
1305 gdtr.pGdt = (uintptr_t)gdtr64.uAddr;
1306 }
1307 else
1308#endif
1309 {
1310 ASMGetGDTR(&gdtr);
1311 rc = VMXWriteVmcs(VMX_VMCS_HOST_GDTR_BASE, gdtr.pGdt);
1312 ASMGetIDTR(&idtr);
1313 rc |= VMXWriteVmcs(VMX_VMCS_HOST_IDTR_BASE, idtr.pIdt);
1314 AssertRC(rc);
1315 Log2(("VMX_VMCS_HOST_GDTR_BASE %RHv\n", gdtr.pGdt));
1316 Log2(("VMX_VMCS_HOST_IDTR_BASE %RHv\n", idtr.pIdt));
1317 }
1318
1319 /*
1320 * Save the base address of the TR selector.
1321 */
1322 if (SelTR > gdtr.cbGdt)
1323 {
1324 AssertMsgFailed(("Invalid TR selector %x. GDTR.cbGdt=%x\n", SelTR, gdtr.cbGdt));
1325 return VERR_VMX_INVALID_HOST_STATE;
1326 }
1327
1328 pDesc = (PCX86DESCHC)(gdtr.pGdt + (SelTR & X86_SEL_MASK));
1329#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1330 if (VMX_IS_64BIT_HOST_MODE())
1331 {
1332 uint64_t trBase64 = X86DESC64_BASE((PX86DESC64)pDesc);
1333 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, trBase64);
1334 Log2(("VMX_VMCS_HOST_TR_BASE %RX64\n", trBase64));
1335 AssertRC(rc);
1336 }
1337 else
1338#endif
1339 {
1340#if HC_ARCH_BITS == 64
1341 trBase = X86DESC64_BASE(pDesc);
1342#else
1343 trBase = X86DESC_BASE(pDesc);
1344#endif
1345 rc = VMXWriteVmcs(VMX_VMCS_HOST_TR_BASE, trBase);
1346 AssertRC(rc);
1347 Log2(("VMX_VMCS_HOST_TR_BASE %RHv\n", trBase));
1348 }
1349
1350 /*
1351 * FS base and GS base.
1352 */
1353#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1354 if (VMX_IS_64BIT_HOST_MODE())
1355 {
1356 Log2(("MSR_K8_FS_BASE = %RX64\n", ASMRdMsr(MSR_K8_FS_BASE)));
1357 Log2(("MSR_K8_GS_BASE = %RX64\n", ASMRdMsr(MSR_K8_GS_BASE)));
1358 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, ASMRdMsr(MSR_K8_FS_BASE));
1359 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, ASMRdMsr(MSR_K8_GS_BASE));
1360 }
1361#endif
1362 AssertRC(rc);
1363
1364 /*
1365 * Sysenter MSRs.
1366 */
1367 /** @todo expensive!! */
1368 rc = VMXWriteVmcs(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
1369 Log2(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_CS)));
1370#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
1371 if (VMX_IS_64BIT_HOST_MODE())
1372 {
1373 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_EIP)));
1374 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_ESP)));
1375 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
1376 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
1377 }
1378 else
1379 {
1380 rc |= VMXWriteVmcs(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
1381 rc |= VMXWriteVmcs(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
1382 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
1383 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
1384 }
1385#elif HC_ARCH_BITS == 32
1386 rc |= VMXWriteVmcs(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
1387 rc |= VMXWriteVmcs(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
1388 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP)));
1389 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX32\n", ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP)));
1390#else
1391 Log2(("VMX_VMCS_HOST_SYSENTER_EIP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_EIP)));
1392 Log2(("VMX_VMCS_HOST_SYSENTER_ESP %RX64\n", ASMRdMsr(MSR_IA32_SYSENTER_ESP)));
1393 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
1394 rc |= VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
1395#endif
1396 AssertRC(rc);
1397
1398
1399#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1400 /*
1401 * Store all host MSRs in the VM-Exit load area, so they will be reloaded after
1402 * the world switch back to the host.
1403 */
1404 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
1405 unsigned idxMsr = 0;
1406
1407 uint32_t u32HostExtFeatures = ASMCpuId_EDX(0x80000001);
1408 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
1409 {
1410 pMsr->u32IndexMSR = MSR_K6_EFER;
1411 pMsr->u32Reserved = 0;
1412# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1413 if (CPUMIsGuestInLongMode(pVCpu))
1414 {
1415 /* Must match the EFER value in our 64 bits switcher. */
1416 pMsr->u64Value = ASMRdMsr(MSR_K6_EFER) | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
1417 }
1418 else
1419# endif
1420 pMsr->u64Value = ASMRdMsr(MSR_K6_EFER);
1421 pMsr++; idxMsr++;
1422 }
1423
1424# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1425 if (VMX_IS_64BIT_HOST_MODE())
1426 {
1427 pMsr->u32IndexMSR = MSR_K6_STAR;
1428 pMsr->u32Reserved = 0;
1429 pMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
1430 pMsr++; idxMsr++;
1431 pMsr->u32IndexMSR = MSR_K8_LSTAR;
1432 pMsr->u32Reserved = 0;
1433 pMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64 bits mode syscall rip */
1434 pMsr++; idxMsr++;
1435 pMsr->u32IndexMSR = MSR_K8_SF_MASK;
1436 pMsr->u32Reserved = 0;
1437 pMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
1438 pMsr++; idxMsr++;
1439
1440 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */
1441#if 0
1442 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
1443 pMsr->u32Reserved = 0;
1444 pMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
1445 pMsr++; idxMsr++;
1446#endif
1447 }
1448# endif
1449
1450 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1451 {
1452 pMsr->u32IndexMSR = MSR_K8_TSC_AUX;
1453 pMsr->u32Reserved = 0;
1454 pMsr->u64Value = ASMRdMsr(MSR_K8_TSC_AUX);
1455 pMsr++; idxMsr++;
1456 }
1457
1458 /** @todo r=ramshankar: check IA32_VMX_MISC bits 27:25 for valid idxMsr
1459 * range. */
1460 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, idxMsr);
1461 AssertRC(rc);
1462#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
1463
1464 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
1465 }
1466 return rc;
1467}
1468
1469
1470/**
1471 * Loads the 4 PDPEs into the guest state when nested paging is used and the
1472 * guest operates in PAE mode.
1473 *
1474 * @returns VBox status code.
1475 * @param pVCpu Pointer to the VMCPU.
1476 * @param pCtx Pointer to the guest CPU context.
1477 */
1478static int hmR0VmxLoadPaePdpes(PVMCPU pVCpu, PCPUMCTX pCtx)
1479{
1480 if (CPUMIsGuestInPAEModeEx(pCtx))
1481 {
1482 X86PDPE aPdpes[4];
1483 int rc = PGMGstGetPaePdpes(pVCpu, &aPdpes[0]);
1484 AssertRCReturn(rc, rc);
1485
1486 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, aPdpes[0].u); AssertRCReturn(rc, rc);
1487 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, aPdpes[1].u); AssertRCReturn(rc, rc);
1488 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, aPdpes[2].u); AssertRCReturn(rc, rc);
1489 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, aPdpes[3].u); AssertRCReturn(rc, rc);
1490 }
1491 return VINF_SUCCESS;
1492}
1493
1494
1495/**
1496 * Saves the 4 PDPEs into the guest state when nested paging is used and the
1497 * guest operates in PAE mode.
1498 *
1499 * @returns VBox status code.
1500 * @param pVCpu Pointer to the VM CPU.
1501 * @param pCtx Pointer to the guest CPU context.
1502 *
1503 * @remarks Tell PGM about CR3 changes before calling this helper.
1504 */
1505static int hmR0VmxSavePaePdpes(PVMCPU pVCpu, PCPUMCTX pCtx)
1506{
1507 if (CPUMIsGuestInPAEModeEx(pCtx))
1508 {
1509 int rc;
1510 X86PDPE aPdpes[4];
1511 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &aPdpes[0].u); AssertRCReturn(rc, rc);
1512 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &aPdpes[1].u); AssertRCReturn(rc, rc);
1513 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &aPdpes[2].u); AssertRCReturn(rc, rc);
1514 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &aPdpes[3].u); AssertRCReturn(rc, rc);
1515
1516 rc = PGMGstUpdatePaePdpes(pVCpu, &aPdpes[0]);
1517 AssertRCReturn(rc, rc);
1518 }
1519 return VINF_SUCCESS;
1520}
1521
1522
1523/**
1524 * Update the exception bitmap according to the current CPU state.
1525 *
1526 * @param pVM Pointer to the VM.
1527 * @param pVCpu Pointer to the VMCPU.
1528 * @param pCtx Pointer to the guest CPU context.
1529 */
1530static void hmR0VmxUpdateExceptionBitmap(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1531{
1532 uint32_t u32TrapMask;
1533 Assert(pCtx);
1534
1535 /*
1536 * Set up a mask for intercepting traps.
1537 */
1538 /** @todo Do we really need to always intercept #DB? */
1539 u32TrapMask = RT_BIT(X86_XCPT_DB)
1540 | RT_BIT(X86_XCPT_NM)
1541#ifdef VBOX_ALWAYS_TRAP_PF
1542 | RT_BIT(X86_XCPT_PF)
1543#endif
1544#ifdef VBOX_STRICT
1545 | RT_BIT(X86_XCPT_BP)
1546 | RT_BIT(X86_XCPT_DB)
1547 | RT_BIT(X86_XCPT_DE)
1548 | RT_BIT(X86_XCPT_NM)
1549 | RT_BIT(X86_XCPT_UD)
1550 | RT_BIT(X86_XCPT_NP)
1551 | RT_BIT(X86_XCPT_SS)
1552 | RT_BIT(X86_XCPT_GP)
1553 | RT_BIT(X86_XCPT_MF)
1554#endif
1555 ;
1556
1557 /*
1558 * Without nested paging, #PF must be intercepted to implement shadow paging.
1559 */
1560 /** @todo NP state won't change so maybe we should build the initial trap mask up front? */
1561 if (!pVM->hm.s.fNestedPaging)
1562 u32TrapMask |= RT_BIT(X86_XCPT_PF);
1563
1564 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
1565 if (!(pCtx->cr0 & X86_CR0_NE))
1566 u32TrapMask |= RT_BIT(X86_XCPT_MF);
1567
1568#ifdef VBOX_STRICT
1569 Assert(u32TrapMask & RT_BIT(X86_XCPT_GP));
1570#endif
1571
1572 /*
1573 * Intercept all exceptions in real mode as none of them can be injected directly (#GP otherwise).
1574 */
1575 /** @todo Despite the claim to intercept everything, with NP we do not intercept #PF. Should we? */
1576 if ( CPUMIsGuestInRealModeEx(pCtx)
1577 && pVM->hm.s.vmx.pRealModeTSS)
1578 {
1579 u32TrapMask |= RT_BIT(X86_XCPT_DE)
1580 | RT_BIT(X86_XCPT_DB)
1581 | RT_BIT(X86_XCPT_NMI)
1582 | RT_BIT(X86_XCPT_BP)
1583 | RT_BIT(X86_XCPT_OF)
1584 | RT_BIT(X86_XCPT_BR)
1585 | RT_BIT(X86_XCPT_UD)
1586 | RT_BIT(X86_XCPT_DF)
1587 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN)
1588 | RT_BIT(X86_XCPT_TS)
1589 | RT_BIT(X86_XCPT_NP)
1590 | RT_BIT(X86_XCPT_SS)
1591 | RT_BIT(X86_XCPT_GP)
1592 | RT_BIT(X86_XCPT_MF)
1593 | RT_BIT(X86_XCPT_AC)
1594 | RT_BIT(X86_XCPT_MC)
1595 | RT_BIT(X86_XCPT_XF)
1596 ;
1597 }
1598
1599 int rc = VMXWriteVmcs(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32TrapMask);
1600 AssertRC(rc);
1601}
1602
1603
1604/**
1605 * Loads a minimal guest state.
1606 *
1607 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!
1608 *
1609 * @param pVM Pointer to the VM.
1610 * @param pVCpu Pointer to the VMCPU.
1611 * @param pCtx Pointer to the guest CPU context.
1612 */
1613VMMR0DECL(void) VMXR0LoadMinimalGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1614{
1615 int rc;
1616 X86EFLAGS eflags;
1617
1618 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST));
1619
1620 /*
1621 * Load EIP, ESP and EFLAGS.
1622 */
1623 rc = VMXWriteVmcs64(VMX_VMCS_GUEST_RIP, pCtx->rip);
1624 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_RSP, pCtx->rsp);
1625 AssertRC(rc);
1626
1627 /*
1628 * Bits 22-31, 15, 5 & 3 must be zero. Bit 1 must be 1.
1629 */
1630 eflags = pCtx->eflags;
1631 eflags.u32 &= VMX_EFLAGS_RESERVED_0;
1632 eflags.u32 |= VMX_EFLAGS_RESERVED_1;
1633
1634 /*
1635 * Check if real mode emulation using v86 mode.
1636 */
1637 if ( CPUMIsGuestInRealModeEx(pCtx)
1638 && pVM->hm.s.vmx.pRealModeTSS)
1639 {
1640 pVCpu->hm.s.vmx.RealMode.eflags = eflags;
1641
1642 eflags.Bits.u1VM = 1;
1643 eflags.Bits.u2IOPL = 0; /* must always be 0 or else certain instructions won't cause faults. */
1644 }
1645 rc = VMXWriteVmcs(VMX_VMCS_GUEST_RFLAGS, eflags.u32);
1646 AssertRC(rc);
1647}
1648
1649
1650/**
1651 * Loads the guest state.
1652 *
1653 * NOTE: Don't do anything here that can cause a jump back to ring 3!!!!!
1654 *
1655 * @returns VBox status code.
1656 * @param pVM Pointer to the VM.
1657 * @param pVCpu Pointer to the VMCPU.
1658 * @param pCtx Pointer to the guest CPU context.
1659 */
1660VMMR0DECL(int) VMXR0LoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
1661{
1662 int rc = VINF_SUCCESS;
1663 RTGCUINTPTR val;
1664
1665 /*
1666 * VMX_VMCS_CTRL_ENTRY_CONTROLS
1667 * Set required bits to one and zero according to the MSR capabilities.
1668 */
1669 val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0;
1670
1671 /*
1672 * Load guest debug controls (DR7 & IA32_DEBUGCTL_MSR).
1673 * Forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs
1674 */
1675 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_LOAD_DEBUG;
1676
1677 if (CPUMIsGuestInLongModeEx(pCtx))
1678 val |= VMX_VMCS_CTRL_ENTRY_CONTROLS_IA32E_MODE_GUEST;
1679 /* else Must be zero when AMD64 is not available. */
1680
1681 /*
1682 * Mask away the bits that the CPU doesn't support.
1683 */
1684 val &= pVM->hm.s.vmx.msr.vmx_entry.n.allowed1;
1685 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY_CONTROLS, val);
1686 AssertRC(rc);
1687
1688 /*
1689 * VMX_VMCS_CTRL_EXIT_CONTROLS
1690 * Set required bits to one and zero according to the MSR capabilities.
1691 */
1692 val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0;
1693
1694 /*
1695 * Save debug controls (DR7 & IA32_DEBUGCTL_MSR)
1696 * Forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs
1697 */
1698 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_SAVE_DEBUG;
1699
1700#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
1701 if (VMX_IS_64BIT_HOST_MODE())
1702 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE;
1703 /* else Must be zero when AMD64 is not available. */
1704#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
1705 if (CPUMIsGuestInLongModeEx(pCtx))
1706 val |= VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE; /* our switcher goes to long mode */
1707 else
1708 Assert(!(val & VMX_VMCS_CTRL_EXIT_CONTROLS_HOST_ADDR_SPACE_SIZE));
1709#endif
1710 val &= pVM->hm.s.vmx.msr.vmx_exit.n.allowed1;
1711
1712 /*
1713 * Don't acknowledge external interrupts on VM-exit.
1714 */
1715 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_EXIT_CONTROLS, val);
1716 AssertRC(rc);
1717
1718 /*
1719 * Guest CPU context: ES, CS, SS, DS, FS, GS.
1720 */
1721 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
1722 {
1723 if (pVM->hm.s.vmx.pRealModeTSS)
1724 {
1725 PGMMODE enmGuestMode = PGMGetGuestMode(pVCpu);
1726 if (pVCpu->hm.s.vmx.enmLastSeenGuestMode != enmGuestMode)
1727 {
1728 /*
1729 * Correct weird requirements for switching to protected mode.
1730 */
1731 if ( pVCpu->hm.s.vmx.enmLastSeenGuestMode == PGMMODE_REAL
1732 && enmGuestMode >= PGMMODE_PROTECTED)
1733 {
1734#ifdef VBOX_WITH_REM
1735 /*
1736 * Flush the recompiler code cache as it's not unlikely the guest will rewrite code
1737 * it will later execute in real mode (OpenBSD 4.0 is one such example)
1738 */
1739 REMFlushTBs(pVM);
1740#endif
1741
1742 /*
1743 * DPL of all hidden selector registers must match the current CPL (0).
1744 */
1745 pCtx->cs.Attr.n.u2Dpl = 0;
1746 pCtx->cs.Attr.n.u4Type = X86_SEL_TYPE_CODE | X86_SEL_TYPE_RW_ACC;
1747
1748 pCtx->ds.Attr.n.u2Dpl = 0;
1749 pCtx->es.Attr.n.u2Dpl = 0;
1750 pCtx->fs.Attr.n.u2Dpl = 0;
1751 pCtx->gs.Attr.n.u2Dpl = 0;
1752 pCtx->ss.Attr.n.u2Dpl = 0;
1753 }
1754 pVCpu->hm.s.vmx.enmLastSeenGuestMode = enmGuestMode;
1755 }
1756 }
1757
1758 VMX_WRITE_SELREG(ES, es);
1759 AssertRC(rc);
1760
1761 VMX_WRITE_SELREG(CS, cs);
1762 AssertRC(rc);
1763
1764 VMX_WRITE_SELREG(SS, ss);
1765 AssertRC(rc);
1766
1767 VMX_WRITE_SELREG(DS, ds);
1768 AssertRC(rc);
1769
1770 VMX_WRITE_SELREG(FS, fs);
1771 AssertRC(rc);
1772
1773 VMX_WRITE_SELREG(GS, gs);
1774 AssertRC(rc);
1775 }
1776
1777 /*
1778 * Guest CPU context: LDTR.
1779 */
1780 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
1781 {
1782 if (pCtx->ldtr.Sel == 0)
1783 {
1784 rc = VMXWriteVmcs(VMX_VMCS16_GUEST_FIELD_LDTR, 0);
1785 rc |= VMXWriteVmcs(VMX_VMCS32_GUEST_LDTR_LIMIT, 0);
1786 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_LDTR_BASE, 0); /* @todo removing "64" in the function should be the same. */
1787 /* Note: vmlaunch will fail with 0 or just 0x02. No idea why. */
1788 rc |= VMXWriteVmcs(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, 0x82 /* present, LDT */);
1789 }
1790 else
1791 {
1792 rc = VMXWriteVmcs(VMX_VMCS16_GUEST_FIELD_LDTR, pCtx->ldtr.Sel);
1793 rc |= VMXWriteVmcs(VMX_VMCS32_GUEST_LDTR_LIMIT, pCtx->ldtr.u32Limit);
1794 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_LDTR_BASE, pCtx->ldtr.u64Base); /* @todo removing "64" and it should be the same */
1795 rc |= VMXWriteVmcs(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, pCtx->ldtr.Attr.u);
1796 }
1797 AssertRC(rc);
1798 }
1799
1800 /*
1801 * Guest CPU context: TR.
1802 */
1803 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
1804 {
1805 /*
1806 * Real mode emulation using v86 mode with CR4.VME (interrupt redirection
1807 * using the int bitmap in the TSS).
1808 */
1809 if ( CPUMIsGuestInRealModeEx(pCtx)
1810 && pVM->hm.s.vmx.pRealModeTSS)
1811 {
1812 RTGCPHYS GCPhys;
1813
1814 /* We convert it here every time as PCI regions could be reconfigured. */
1815 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
1816 AssertRC(rc);
1817
1818 rc = VMXWriteVmcs(VMX_VMCS16_GUEST_FIELD_TR, 0);
1819 rc |= VMXWriteVmcs(VMX_VMCS32_GUEST_TR_LIMIT, HM_VTX_TSS_SIZE);
1820 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_TR_BASE, GCPhys /* phys = virt in this mode */);
1821
1822 X86DESCATTR attr;
1823
1824 attr.u = 0;
1825 attr.n.u1Present = 1;
1826 attr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
1827 val = attr.u;
1828 }
1829 else
1830 {
1831 rc = VMXWriteVmcs(VMX_VMCS16_GUEST_FIELD_TR, pCtx->tr.Sel);
1832 rc |= VMXWriteVmcs(VMX_VMCS32_GUEST_TR_LIMIT, pCtx->tr.u32Limit);
1833 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_TR_BASE, pCtx->tr.u64Base);
1834
1835 val = pCtx->tr.Attr.u;
1836
1837 /* The TSS selector must be busy (REM bugs? see defect #XXXX). */
1838 if (!(val & X86_SEL_TYPE_SYS_TSS_BUSY_MASK))
1839 {
1840 if (val & 0xf)
1841 val |= X86_SEL_TYPE_SYS_TSS_BUSY_MASK;
1842 else
1843 /* Default if no TR selector has been set (otherwise vmlaunch will fail!) */
1844 val = (val & ~0xF) | X86_SEL_TYPE_SYS_386_TSS_BUSY;
1845 }
1846 AssertMsg((val & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY || (val & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY,
1847 ("%#x\n", val));
1848 }
1849 rc |= VMXWriteVmcs(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, val);
1850 AssertRC(rc);
1851 }
1852
1853 /*
1854 * Guest CPU context: GDTR.
1855 */
1856 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
1857 {
1858 rc = VMXWriteVmcs(VMX_VMCS32_GUEST_GDTR_LIMIT, pCtx->gdtr.cbGdt);
1859 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_GDTR_BASE, pCtx->gdtr.pGdt);
1860 AssertRC(rc);
1861 }
1862
1863 /*
1864 * Guest CPU context: IDTR.
1865 */
1866 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
1867 {
1868 rc = VMXWriteVmcs(VMX_VMCS32_GUEST_IDTR_LIMIT, pCtx->idtr.cbIdt);
1869 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_IDTR_BASE, pCtx->idtr.pIdt);
1870 AssertRC(rc);
1871 }
1872
1873 /*
1874 * Sysenter MSRs.
1875 */
1876 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR)
1877 {
1878 rc = VMXWriteVmcs(VMX_VMCS32_GUEST_SYSENTER_CS, pCtx->SysEnter.cs);
1879 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_SYSENTER_EIP, pCtx->SysEnter.eip);
1880 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_SYSENTER_ESP, pCtx->SysEnter.esp);
1881 AssertRC(rc);
1882 }
1883
1884 /*
1885 * Guest CPU context: Control registers.
1886 */
1887 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
1888 {
1889 val = pCtx->cr0;
1890 rc = VMXWriteVmcs(VMX_VMCS_CTRL_CR0_READ_SHADOW, val);
1891 Log2(("Guest CR0-shadow %08x\n", val));
1892 if (CPUMIsGuestFPUStateActive(pVCpu) == false)
1893 {
1894 /* Always use #NM exceptions to load the FPU/XMM state on demand. */
1895 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_NE | X86_CR0_MP;
1896 }
1897 else
1898 {
1899 /** @todo check if we support the old style mess correctly. */
1900 if (!(val & X86_CR0_NE))
1901 Log(("Forcing X86_CR0_NE!!!\n"));
1902
1903 val |= X86_CR0_NE; /* always turn on the native mechanism to report FPU errors (old style uses interrupts) */
1904 }
1905 /* Protected mode & paging are always enabled; we use them for emulating real and protected mode without paging too. */
1906 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
1907 val |= X86_CR0_PE | X86_CR0_PG;
1908
1909 if (pVM->hm.s.fNestedPaging)
1910 {
1911 if (CPUMIsGuestInPagedProtectedModeEx(pCtx))
1912 {
1913 /* Disable CR3 read/write monitoring as we don't need it for EPT. */
1914 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
1915 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT);
1916 }
1917 else
1918 {
1919 /* Reenable CR3 read/write monitoring as our identity mapped page table is active. */
1920 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
1921 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
1922 }
1923 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
1924 AssertRC(rc);
1925 }
1926 else
1927 {
1928 /* Note: We must also set this as we rely on protecting various pages for which supervisor writes must be caught. */
1929 val |= X86_CR0_WP;
1930 }
1931
1932 /* Always enable caching. */
1933 val &= ~(X86_CR0_CD|X86_CR0_NW);
1934
1935 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_CR0, val);
1936 Log2(("Guest CR0 %08x\n", val));
1937
1938 /*
1939 * CR0 flags owned by the host; if the guests attempts to change them, then the VM will exit.
1940 */
1941 val = X86_CR0_PE /* Must monitor this bit (assumptions are made for real mode emulation) */
1942 | X86_CR0_WP /* Must monitor this bit (it must always be enabled). */
1943 | X86_CR0_PG /* Must monitor this bit (assumptions are made for real mode & protected mode without paging emulation) */
1944 | X86_CR0_CD /* Bit not restored during VM-exit! */
1945 | X86_CR0_NW /* Bit not restored during VM-exit! */
1946 | X86_CR0_NE;
1947
1948 /*
1949 * When the guest's FPU state is active, then we no longer care about the FPU related bits.
1950 */
1951 if (CPUMIsGuestFPUStateActive(pVCpu) == false)
1952 val |= X86_CR0_TS | X86_CR0_ET | X86_CR0_MP;
1953
1954 pVCpu->hm.s.vmx.cr0_mask = val;
1955
1956 rc |= VMXWriteVmcs(VMX_VMCS_CTRL_CR0_MASK, val);
1957 Log2(("Guest CR0-mask %08x\n", val));
1958 AssertRC(rc);
1959 }
1960
1961 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
1962 {
1963 rc = VMXWriteVmcs(VMX_VMCS_CTRL_CR4_READ_SHADOW, pCtx->cr4);
1964 Log2(("Guest CR4-shadow %08x\n", pCtx->cr4));
1965 /* Set the required bits in cr4 too (currently X86_CR4_VMXE). */
1966 val = pCtx->cr4 | (uint32_t)pVM->hm.s.vmx.msr.vmx_cr4_fixed0;
1967
1968 if (!pVM->hm.s.fNestedPaging)
1969 {
1970 switch (pVCpu->hm.s.enmShadowMode)
1971 {
1972 case PGMMODE_REAL: /* Real mode -> emulated using v86 mode */
1973 case PGMMODE_PROTECTED: /* Protected mode, no paging -> emulated using identity mapping. */
1974 case PGMMODE_32_BIT: /* 32-bit paging. */
1975 val &= ~X86_CR4_PAE;
1976 break;
1977
1978 case PGMMODE_PAE: /* PAE paging. */
1979 case PGMMODE_PAE_NX: /* PAE paging with NX enabled. */
1980 /** Must use PAE paging as we could use physical memory > 4 GB */
1981 val |= X86_CR4_PAE;
1982 break;
1983
1984 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
1985 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
1986#ifdef VBOX_ENABLE_64_BITS_GUESTS
1987 break;
1988#else
1989 AssertFailed();
1990 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1991#endif
1992 default: /* shut up gcc */
1993 AssertFailed();
1994 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
1995 }
1996 }
1997 else if ( !CPUMIsGuestInPagedProtectedModeEx(pCtx)
1998 && !pVM->hm.s.vmx.fUnrestrictedGuest)
1999 {
2000 /* We use 4 MB pages in our identity mapping page table for real and protected mode without paging. */
2001 val |= X86_CR4_PSE;
2002 /* Our identity mapping is a 32 bits page directory. */
2003 val &= ~X86_CR4_PAE;
2004 }
2005
2006 /*
2007 * Turn off VME if we're in emulated real mode.
2008 */
2009 if ( CPUMIsGuestInRealModeEx(pCtx)
2010 && pVM->hm.s.vmx.pRealModeTSS)
2011 {
2012 val &= ~X86_CR4_VME;
2013 }
2014
2015 rc |= VMXWriteVmcs64(VMX_VMCS_GUEST_CR4, val);
2016 Log2(("Guest CR4 %08x\n", val));
2017
2018 /*
2019 * CR4 flags owned by the host; if the guests attempts to change them, then the VM will exit.
2020 */
2021 val = 0
2022 | X86_CR4_VME
2023 | X86_CR4_PAE
2024 | X86_CR4_PGE
2025 | X86_CR4_PSE
2026 | X86_CR4_VMXE;
2027 pVCpu->hm.s.vmx.cr4_mask = val;
2028
2029 rc |= VMXWriteVmcs(VMX_VMCS_CTRL_CR4_MASK, val);
2030 Log2(("Guest CR4-mask %08x\n", val));
2031 AssertRC(rc);
2032 }
2033
2034#if 0
2035 /* Enable single stepping if requested and CPU supports it. */
2036 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG)
2037 if (DBGFIsStepping(pVCpu))
2038 {
2039 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
2040 rc = VMXWriteVmcs(VMX_VMCS_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2041 AssertRC(rc);
2042 }
2043#endif
2044
2045 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
2046 {
2047 if (pVM->hm.s.fNestedPaging)
2048 {
2049 Assert(PGMGetHyperCR3(pVCpu));
2050 pVCpu->hm.s.vmx.GCPhysEPTP = PGMGetHyperCR3(pVCpu);
2051
2052 Assert(!(pVCpu->hm.s.vmx.GCPhysEPTP & 0xfff));
2053 /** @todo Check the IA32_VMX_EPT_VPID_CAP MSR for other supported memory types. */
2054 pVCpu->hm.s.vmx.GCPhysEPTP |= VMX_EPT_MEMTYPE_WB
2055 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
2056
2057 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.GCPhysEPTP);
2058 AssertRC(rc);
2059
2060 if ( !CPUMIsGuestInPagedProtectedModeEx(pCtx)
2061 && !pVM->hm.s.vmx.fUnrestrictedGuest)
2062 {
2063 RTGCPHYS GCPhys;
2064
2065 /* We convert it here every time as PCI regions could be reconfigured. */
2066 rc = PDMVMMDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
2067 AssertMsgRC(rc, ("pNonPagingModeEPTPageTable = %RGv\n", pVM->hm.s.vmx.pNonPagingModeEPTPageTable));
2068
2069 /*
2070 * We use our identity mapping page table here as we need to map guest virtual to
2071 * guest physical addresses; EPT will take care of the translation to host physical addresses.
2072 */
2073 val = GCPhys;
2074 }
2075 else
2076 {
2077 /* Save the real guest CR3 in VMX_VMCS_GUEST_CR3 */
2078 val = pCtx->cr3;
2079 rc = hmR0VmxLoadPaePdpes(pVCpu, pCtx);
2080 AssertRCReturn(rc, rc);
2081 }
2082 }
2083 else
2084 {
2085 val = PGMGetHyperCR3(pVCpu);
2086 Assert(val || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL));
2087 }
2088
2089 /* Save our shadow CR3 register. */
2090 rc = VMXWriteVmcs64(VMX_VMCS_GUEST_CR3, val);
2091 AssertRC(rc);
2092 }
2093
2094 /*
2095 * Guest CPU context: Debug registers.
2096 */
2097 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
2098 {
2099 pCtx->dr[6] |= X86_DR6_INIT_VAL; /* set all reserved bits to 1. */
2100 pCtx->dr[6] &= ~RT_BIT(12); /* must be zero. */
2101
2102 pCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
2103 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
2104 pCtx->dr[7] |= 0x400; /* must be one */
2105
2106 /* Resync DR7 */
2107 rc = VMXWriteVmcs64(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
2108 AssertRC(rc);
2109
2110#ifdef DEBUG
2111 /* Sync the hypervisor debug state now if any breakpoint is armed. */
2112 if ( CPUMGetHyperDR7(pVCpu) & (X86_DR7_ENABLED_MASK|X86_DR7_GD)
2113 && !CPUMIsHyperDebugStateActive(pVCpu)
2114 && !DBGFIsStepping(pVCpu))
2115 {
2116 /* Save the host and load the hypervisor debug state. */
2117 rc = CPUMR0LoadHyperDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
2118 AssertRC(rc);
2119
2120 /* DRx intercepts remain enabled. */
2121
2122 /* Override dr7 with the hypervisor value. */
2123 rc = VMXWriteVmcs64(VMX_VMCS_GUEST_DR7, CPUMGetHyperDR7(pVCpu));
2124 AssertRC(rc);
2125 }
2126 else
2127#endif
2128 /* Sync the debug state now if any breakpoint is armed. */
2129 if ( (pCtx->dr[7] & (X86_DR7_ENABLED_MASK|X86_DR7_GD))
2130 && !CPUMIsGuestDebugStateActive(pVCpu)
2131 && !DBGFIsStepping(pVCpu))
2132 {
2133 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
2134
2135 /* Disable DRx move intercepts. */
2136 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
2137 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2138 AssertRC(rc);
2139
2140 /* Save the host and load the guest debug state. */
2141 rc = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
2142 AssertRC(rc);
2143 }
2144
2145 /* IA32_DEBUGCTL MSR. */
2146 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0);
2147 AssertRC(rc);
2148
2149 /** @todo do we really ever need this? */
2150 rc |= VMXWriteVmcs(VMX_VMCS_GUEST_DEBUG_EXCEPTIONS, 0);
2151 AssertRC(rc);
2152 }
2153
2154 /*
2155 * 64-bit guest mode.
2156 */
2157 if (CPUMIsGuestInLongModeEx(pCtx))
2158 {
2159#if !defined(VBOX_ENABLE_64_BITS_GUESTS)
2160 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2161#elif HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2162 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
2163#else
2164# ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2165 if (!pVM->hm.s.fAllow64BitGuests)
2166 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
2167# endif
2168 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
2169#endif
2170 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_MSR)
2171 {
2172 /* Update these as wrmsr might have changed them. */
2173 rc = VMXWriteVmcs64(VMX_VMCS_GUEST_FS_BASE, pCtx->fs.u64Base);
2174 AssertRC(rc);
2175 rc = VMXWriteVmcs64(VMX_VMCS_GUEST_GS_BASE, pCtx->gs.u64Base);
2176 AssertRC(rc);
2177 }
2178 }
2179 else
2180 {
2181 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
2182 }
2183
2184 hmR0VmxUpdateExceptionBitmap(pVM, pVCpu, pCtx);
2185
2186#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2187 /*
2188 * Store all guest MSRs in the VM-entry load area, so they will be loaded
2189 * during VM-entry and restored into the VM-exit store area during VM-exit.
2190 */
2191 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
2192 unsigned idxMsr = 0;
2193
2194 uint32_t u32GstExtFeatures;
2195 uint32_t u32Temp;
2196 CPUMGetGuestCpuId(pVCpu, 0x80000001, &u32Temp, &u32Temp, &u32Temp, &u32GstExtFeatures);
2197
2198 if (u32GstExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2199 {
2200 pMsr->u32IndexMSR = MSR_K6_EFER;
2201 pMsr->u32Reserved = 0;
2202 pMsr->u64Value = pCtx->msrEFER;
2203 /* VT-x will complain if only MSR_K6_EFER_LME is set. */
2204 if (!CPUMIsGuestInLongModeEx(pCtx))
2205 pMsr->u64Value &= ~(MSR_K6_EFER_LMA | MSR_K6_EFER_LME);
2206 pMsr++; idxMsr++;
2207
2208 if (u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_LONG_MODE)
2209 {
2210 pMsr->u32IndexMSR = MSR_K8_LSTAR;
2211 pMsr->u32Reserved = 0;
2212 pMsr->u64Value = pCtx->msrLSTAR; /* 64 bits mode syscall rip */
2213 pMsr++; idxMsr++;
2214 pMsr->u32IndexMSR = MSR_K6_STAR;
2215 pMsr->u32Reserved = 0;
2216 pMsr->u64Value = pCtx->msrSTAR; /* legacy syscall eip, cs & ss */
2217 pMsr++; idxMsr++;
2218 pMsr->u32IndexMSR = MSR_K8_SF_MASK;
2219 pMsr->u32Reserved = 0;
2220 pMsr->u64Value = pCtx->msrSFMASK; /* syscall flag mask */
2221 pMsr++; idxMsr++;
2222
2223 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */
2224#if 0
2225 pMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2226 pMsr->u32Reserved = 0;
2227 pMsr->u64Value = pCtx->msrKERNELGSBASE; /* swapgs exchange value */
2228 pMsr++; idxMsr++;
2229#endif
2230 }
2231 }
2232
2233 if ( pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP
2234 && (u32GstExtFeatures & X86_CPUID_EXT_FEATURE_EDX_RDTSCP))
2235 {
2236 pMsr->u32IndexMSR = MSR_K8_TSC_AUX;
2237 pMsr->u32Reserved = 0;
2238 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pMsr->u64Value);
2239 AssertRC(rc);
2240 pMsr++; idxMsr++;
2241 }
2242
2243 pVCpu->hm.s.vmx.cGuestMsrs = idxMsr;
2244
2245 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, idxMsr);
2246 AssertRC(rc);
2247
2248 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, idxMsr);
2249 AssertRC(rc);
2250#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2251
2252 bool fOffsettedTsc;
2253 if (pVM->hm.s.vmx.fUsePreemptTimer)
2254 {
2255 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
2256
2257 /* Make sure the returned values have sane upper and lower boundaries. */
2258 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
2259
2260 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64 of a second */
2261 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
2262
2263 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
2264 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
2265 rc = VMXWriteVmcs(VMX_VMCS32_GUEST_PREEMPTION_TIMER_VALUE, cPreemptionTickCount);
2266 AssertRC(rc);
2267 }
2268 else
2269 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
2270
2271 if (fOffsettedTsc)
2272 {
2273 uint64_t u64CurTSC = ASMReadTSC();
2274 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
2275 {
2276 /* Note: VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
2277 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset);
2278 AssertRC(rc);
2279
2280 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
2281 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2282 AssertRC(rc);
2283 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
2284 }
2285 else
2286 {
2287 /* Fall back to rdtsc, rdtscp emulation as we would otherwise pass decreasing tsc values to the guest. */
2288 LogFlow(("TSC %RX64 offset %RX64 time=%RX64 last=%RX64 (diff=%RX64, virt_tsc=%RX64)\n", u64CurTSC,
2289 pVCpu->hm.s.vmx.u64TSCOffset, u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset,
2290 TMCpuTickGetLastSeen(pVCpu), TMCpuTickGetLastSeen(pVCpu) - u64CurTSC - pVCpu->hm.s.vmx.u64TSCOffset,
2291 TMCpuTickGet(pVCpu)));
2292 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
2293 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2294 AssertRC(rc);
2295 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
2296 }
2297 }
2298 else
2299 {
2300 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT;
2301 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
2302 AssertRC(rc);
2303 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
2304 }
2305
2306 /* Done with the major changes */
2307 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_ALL_GUEST;
2308
2309 /* Minimal guest state update (ESP, EIP, EFLAGS mostly) */
2310 VMXR0LoadMinimalGuestState(pVM, pVCpu, pCtx);
2311 return rc;
2312}
2313
2314
2315/**
2316 * Syncs back the guest state from VMCS.
2317 *
2318 * @returns VBox status code.
2319 * @param pVM Pointer to the VM.
2320 * @param pVCpu Pointer to the VMCPU.
2321 * @param pCtx Pointer to the guest CPU context.
2322 */
2323DECLINLINE(int) VMXR0SaveGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2324{
2325 RTGCUINTREG val, valShadow;
2326 RTGCUINTPTR uInterruptState;
2327 int rc;
2328
2329 /* First sync back EIP, ESP, and EFLAGS. */
2330 rc = VMXReadCachedVmcs(VMX_VMCS_GUEST_RIP, &val);
2331 AssertRC(rc);
2332 pCtx->rip = val;
2333 rc = VMXReadCachedVmcs(VMX_VMCS_GUEST_RSP, &val);
2334 AssertRC(rc);
2335 pCtx->rsp = val;
2336 rc = VMXReadCachedVmcs(VMX_VMCS_GUEST_RFLAGS, &val);
2337 AssertRC(rc);
2338 pCtx->eflags.u32 = val;
2339
2340 /* Take care of instruction fusing (sti, mov ss) */
2341 rc |= VMXReadCachedVmcs(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &val);
2342 uInterruptState = val;
2343 if (uInterruptState != 0)
2344 {
2345 Assert(uInterruptState <= 2); /* only sti & mov ss */
2346 Log(("uInterruptState %x eip=%RGv\n", (uint32_t)uInterruptState, pCtx->rip));
2347 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip);
2348 }
2349 else
2350 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2351
2352 /* Control registers. */
2353 VMXReadCachedVmcs(VMX_VMCS_CTRL_CR0_READ_SHADOW, &valShadow);
2354 VMXReadCachedVmcs(VMX_VMCS_GUEST_CR0, &val);
2355 val = (valShadow & pVCpu->hm.s.vmx.cr0_mask) | (val & ~pVCpu->hm.s.vmx.cr0_mask);
2356 CPUMSetGuestCR0(pVCpu, val);
2357
2358 VMXReadCachedVmcs(VMX_VMCS_CTRL_CR4_READ_SHADOW, &valShadow);
2359 VMXReadCachedVmcs(VMX_VMCS_GUEST_CR4, &val);
2360 val = (valShadow & pVCpu->hm.s.vmx.cr4_mask) | (val & ~pVCpu->hm.s.vmx.cr4_mask);
2361 CPUMSetGuestCR4(pVCpu, val);
2362
2363 /*
2364 * No reason to sync back the CRx registers. They can't be changed by the guest unless in
2365 * the nested paging case where CR3 & CR4 can be changed by the guest.
2366 */
2367 if ( pVM->hm.s.fNestedPaging
2368 && CPUMIsGuestInPagedProtectedModeEx(pCtx)) /** @todo check if we will always catch mode switches and such... */
2369 {
2370 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
2371
2372 /* Can be updated behind our back in the nested paging case. */
2373 CPUMSetGuestCR2(pVCpu, pCache->cr2);
2374
2375 VMXReadCachedVmcs(VMX_VMCS_GUEST_CR3, &val);
2376
2377 if (val != pCtx->cr3)
2378 {
2379 CPUMSetGuestCR3(pVCpu, val);
2380 PGMUpdateCR3(pVCpu, val);
2381 }
2382 rc = hmR0VmxSavePaePdpes(pVCpu, pCtx);
2383 AssertRCReturn(rc, rc);
2384 }
2385
2386 /* Sync back DR7. */
2387 VMXReadCachedVmcs(VMX_VMCS_GUEST_DR7, &val);
2388 pCtx->dr[7] = val;
2389
2390 /* Guest CPU context: ES, CS, SS, DS, FS, GS. */
2391 VMX_READ_SELREG(ES, es);
2392 VMX_READ_SELREG(SS, ss);
2393 VMX_READ_SELREG(CS, cs);
2394 VMX_READ_SELREG(DS, ds);
2395 VMX_READ_SELREG(FS, fs);
2396 VMX_READ_SELREG(GS, gs);
2397
2398 /* System MSRs */
2399 VMXReadCachedVmcs(VMX_VMCS32_GUEST_SYSENTER_CS, &val);
2400 pCtx->SysEnter.cs = val;
2401 VMXReadCachedVmcs(VMX_VMCS_GUEST_SYSENTER_EIP, &val);
2402 pCtx->SysEnter.eip = val;
2403 VMXReadCachedVmcs(VMX_VMCS_GUEST_SYSENTER_ESP, &val);
2404 pCtx->SysEnter.esp = val;
2405
2406 /* Misc. registers; must sync everything otherwise we can get out of sync when jumping to ring 3. */
2407 VMX_READ_SELREG(LDTR, ldtr);
2408
2409 VMXReadCachedVmcs(VMX_VMCS32_GUEST_GDTR_LIMIT, &val);
2410 pCtx->gdtr.cbGdt = val;
2411 VMXReadCachedVmcs(VMX_VMCS_GUEST_GDTR_BASE, &val);
2412 pCtx->gdtr.pGdt = val;
2413
2414 VMXReadCachedVmcs(VMX_VMCS32_GUEST_IDTR_LIMIT, &val);
2415 pCtx->idtr.cbIdt = val;
2416 VMXReadCachedVmcs(VMX_VMCS_GUEST_IDTR_BASE, &val);
2417 pCtx->idtr.pIdt = val;
2418
2419 /* Real mode emulation using v86 mode. */
2420 if ( CPUMIsGuestInRealModeEx(pCtx)
2421 && pVM->hm.s.vmx.pRealModeTSS)
2422 {
2423 /* Hide our emulation flags */
2424 pCtx->eflags.Bits.u1VM = 0;
2425
2426 /* Restore original IOPL setting as we always use 0. */
2427 pCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.eflags.Bits.u2IOPL;
2428
2429 /* Force a TR resync every time in case we switch modes. */
2430 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_TR;
2431 }
2432 else
2433 {
2434 /* In real mode we have a fake TSS, so only sync it back when it's supposed to be valid. */
2435 VMX_READ_SELREG(TR, tr);
2436 }
2437
2438#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2439 /*
2440 * Save the possibly changed MSRs that we automatically restore and save during a world switch.
2441 */
2442 for (unsigned i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
2443 {
2444 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
2445 pMsr += i;
2446
2447 switch (pMsr->u32IndexMSR)
2448 {
2449 case MSR_K8_LSTAR:
2450 pCtx->msrLSTAR = pMsr->u64Value;
2451 break;
2452 case MSR_K6_STAR:
2453 pCtx->msrSTAR = pMsr->u64Value;
2454 break;
2455 case MSR_K8_SF_MASK:
2456 pCtx->msrSFMASK = pMsr->u64Value;
2457 break;
2458 /* The KERNEL_GS_BASE MSR doesn't work reliably with auto load/store. See @bugref{6208} */
2459#if 0
2460 case MSR_K8_KERNEL_GS_BASE:
2461 pCtx->msrKERNELGSBASE = pMsr->u64Value;
2462 break;
2463#endif
2464 case MSR_K8_TSC_AUX:
2465 CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value);
2466 break;
2467
2468 case MSR_K6_EFER:
2469 /* EFER can't be changed without causing a VM-exit. */
2470 /* Assert(pCtx->msrEFER == pMsr->u64Value); */
2471 break;
2472
2473 default:
2474 AssertFailed();
2475 return VERR_HM_UNEXPECTED_LD_ST_MSR;
2476 }
2477 }
2478#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2479 return VINF_SUCCESS;
2480}
2481
2482
2483/**
2484 * Dummy placeholder for TLB flush handling before VM-entry. Used in the case
2485 * where neither EPT nor VPID is supported by the CPU.
2486 *
2487 * @param pVM Pointer to the VM.
2488 * @param pVCpu Pointer to the VMCPU.
2489 */
2490static DECLCALLBACK(void) hmR0VmxSetupTLBDummy(PVM pVM, PVMCPU pVCpu)
2491{
2492 NOREF(pVM);
2493 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
2494 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2495 pVCpu->hm.s.TlbShootdown.cPages = 0;
2496 return;
2497}
2498
2499
2500/**
2501 * Setup the tagged TLB for EPT+VPID.
2502 *
2503 * @param pVM Pointer to the VM.
2504 * @param pVCpu Pointer to the VMCPU.
2505 */
2506static DECLCALLBACK(void) hmR0VmxSetupTLBBoth(PVM pVM, PVMCPU pVCpu)
2507{
2508 PHMGLOBLCPUINFO pCpu;
2509
2510 Assert(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid);
2511
2512 pCpu = HMR0GetCurrentCpu();
2513
2514 /*
2515 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last
2516 * This can happen both for start & resume due to long jumps back to ring-3.
2517 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2518 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2519 */
2520 bool fNewAsid = false;
2521 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2522 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2523 {
2524 pVCpu->hm.s.fForceTLBFlush = true;
2525 fNewAsid = true;
2526 }
2527
2528 /*
2529 * Check for explicit TLB shootdowns.
2530 */
2531 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2532 pVCpu->hm.s.fForceTLBFlush = true;
2533
2534 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2535
2536 if (pVCpu->hm.s.fForceTLBFlush)
2537 {
2538 if (fNewAsid)
2539 {
2540 ++pCpu->uCurrentAsid;
2541 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2542 {
2543 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
2544 pCpu->cTlbFlushes++;
2545 pCpu->fFlushAsidBeforeUse = true;
2546 }
2547
2548 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2549 if (pCpu->fFlushAsidBeforeUse)
2550 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2551 }
2552 else
2553 {
2554 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
2555 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_SINGLE_CONTEXT, 0 /* GCPtr */);
2556 else
2557 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2558 }
2559
2560 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2561 pVCpu->hm.s.fForceTLBFlush = false;
2562 }
2563 else
2564 {
2565 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
2566 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
2567 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
2568 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
2569
2570 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
2571 * not be executed. See hmQueueInvlPage() where it is commented
2572 * out. Support individual entry flushing someday. */
2573 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2574 {
2575 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
2576
2577 /*
2578 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
2579 * as supported by the CPU.
2580 */
2581 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2582 {
2583 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
2584 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
2585 }
2586 else
2587 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2588 }
2589 else
2590 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2591 }
2592
2593 pVCpu->hm.s.TlbShootdown.cPages = 0;
2594 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2595
2596 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2597 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2598 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2599 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
2600 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2601 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2602
2603 /* Update VMCS with the VPID. */
2604 int rc = VMXWriteVmcs(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2605 AssertRC(rc);
2606}
2607
2608
2609/**
2610 * Setup the tagged TLB for EPT only.
2611 *
2612 * @returns VBox status code.
2613 * @param pVM Pointer to the VM.
2614 * @param pVCpu Pointer to the VMCPU.
2615 */
2616static DECLCALLBACK(void) hmR0VmxSetupTLBEPT(PVM pVM, PVMCPU pVCpu)
2617{
2618 PHMGLOBLCPUINFO pCpu;
2619
2620 Assert(pVM->hm.s.fNestedPaging);
2621 Assert(!pVM->hm.s.vmx.fVpid);
2622
2623 pCpu = HMR0GetCurrentCpu();
2624
2625 /*
2626 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last
2627 * This can happen both for start & resume due to long jumps back to ring-3.
2628 * A change in the TLB flush count implies the host Cpu is online after a suspend/resume.
2629 */
2630 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2631 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2632 {
2633 pVCpu->hm.s.fForceTLBFlush = true;
2634 }
2635
2636 /*
2637 * Check for explicit TLB shootdown flushes.
2638 */
2639 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2640 pVCpu->hm.s.fForceTLBFlush = true;
2641
2642 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2643 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2644
2645 if (pVCpu->hm.s.fForceTLBFlush)
2646 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2647 else
2648 {
2649 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
2650 * not be executed. See hmQueueInvlPage() where it is commented
2651 * out. Support individual entry flushing someday. */
2652 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2653 {
2654 /*
2655 * We cannot flush individual entries without VPID support. Flush using EPT.
2656 */
2657 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
2658 hmR0VmxFlushEPT(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
2659 }
2660 }
2661 pVCpu->hm.s.TlbShootdown.cPages= 0;
2662 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2663
2664#ifdef VBOX_WITH_STATISTICS
2665 /** @todo r=ramshankar: this is not accurate anymore with the VPID+EPT
2666 * handling. Should be fixed later. */
2667 if (pVCpu->hm.s.fForceTLBFlush)
2668 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2669 else
2670 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2671#endif
2672}
2673
2674
2675/**
2676 * Setup the tagged TLB for VPID.
2677 *
2678 * @returns VBox status code.
2679 * @param pVM Pointer to the VM.
2680 * @param pVCpu Pointer to the VMCPU.
2681 */
2682static DECLCALLBACK(void) hmR0VmxSetupTLBVPID(PVM pVM, PVMCPU pVCpu)
2683{
2684 PHMGLOBLCPUINFO pCpu;
2685
2686 Assert(pVM->hm.s.vmx.fVpid);
2687 Assert(!pVM->hm.s.fNestedPaging);
2688
2689 pCpu = HMR0GetCurrentCpu();
2690
2691 /*
2692 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last
2693 * This can happen both for start & resume due to long jumps back to ring-3.
2694 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
2695 * or the host Cpu is online after a suspend/resume, so we cannot reuse the current ASID anymore.
2696 */
2697 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
2698 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
2699 {
2700 /* Force a TLB flush on VM entry. */
2701 pVCpu->hm.s.fForceTLBFlush = true;
2702 }
2703
2704 /*
2705 * Check for explicit TLB shootdown flushes.
2706 */
2707 if (VMCPU_FF_TESTANDCLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
2708 pVCpu->hm.s.fForceTLBFlush = true;
2709
2710 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
2711
2712 if (pVCpu->hm.s.fForceTLBFlush)
2713 {
2714 ++pCpu->uCurrentAsid;
2715 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
2716 {
2717 pCpu->uCurrentAsid = 1; /* start at 1; host uses 0 */
2718 pCpu->cTlbFlushes++;
2719 pCpu->fFlushAsidBeforeUse = true;
2720 }
2721
2722 pVCpu->hm.s.fForceTLBFlush = false;
2723 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
2724 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
2725 if (pCpu->fFlushAsidBeforeUse)
2726 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2727 }
2728 else
2729 {
2730 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
2731 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
2732 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
2733 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
2734
2735 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
2736 * not be executed. See hmQueueInvlPage() where it is commented
2737 * out. Support individual entry flushing someday. */
2738 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
2739 {
2740 /*
2741 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
2742 * as supported by the CPU.
2743 */
2744 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
2745 {
2746 for (unsigned i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
2747 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
2748 }
2749 else
2750 hmR0VmxFlushVPID(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
2751 }
2752 }
2753 pVCpu->hm.s.TlbShootdown.cPages = 0;
2754 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
2755
2756 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
2757 ("Flush count mismatch for cpu %d (%x vs %x)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
2758 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
2759 ("cpu%d uCurrentAsid = %x\n", pCpu->idCpu, pCpu->uCurrentAsid));
2760 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
2761 ("cpu%d VM uCurrentAsid = %x\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
2762
2763 int rc = VMXWriteVmcs(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
2764 AssertRC(rc);
2765
2766# ifdef VBOX_WITH_STATISTICS
2767 /** @todo r=ramshankar: this is not accurate anymore with EPT+VPID handling.
2768 * Should be fixed later. */
2769 if (pVCpu->hm.s.fForceTLBFlush)
2770 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
2771 else
2772 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
2773# endif
2774}
2775
2776
2777/**
2778 * Runs guest code in a VT-x VM.
2779 *
2780 * @returns VBox status code.
2781 * @param pVM Pointer to the VM.
2782 * @param pVCpu Pointer to the VMCPU.
2783 * @param pCtx Pointer to the guest CPU context.
2784 */
2785VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
2786{
2787 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
2788 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
2789 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
2790
2791 VBOXSTRICTRC rc = VINF_SUCCESS;
2792 int rc2;
2793 RTGCUINTREG val;
2794 RTGCUINTREG exitReason = (RTGCUINTREG)VMX_EXIT_INVALID;
2795 RTGCUINTREG instrError, cbInstr;
2796 RTGCUINTPTR exitQualification = 0;
2797 RTGCUINTPTR intInfo = 0; /* shut up buggy gcc 4 */
2798 RTGCUINTPTR errCode, instrInfo;
2799 bool fSetupTPRCaching = false;
2800 uint64_t u64OldLSTAR = 0;
2801 uint8_t u8LastTPR = 0;
2802 RTCCUINTREG uOldEFlags = ~(RTCCUINTREG)0;
2803 unsigned cResume = 0;
2804#ifdef VBOX_STRICT
2805 RTCPUID idCpuCheck;
2806 bool fWasInLongMode = false;
2807#endif
2808#ifdef VBOX_HIGH_RES_TIMERS_HACK_IN_RING0
2809 uint64_t u64LastTime = RTTimeMilliTS();
2810#endif
2811
2812 Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2813 || (pVCpu->hm.s.vmx.pbVirtApic && pVM->hm.s.vmx.pbApicAccess));
2814
2815 /*
2816 * Check if we need to use TPR shadowing.
2817 */
2818 if ( CPUMIsGuestInLongModeEx(pCtx)
2819 || ( (( pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
2820 || pVM->hm.s.fTRPPatchingAllowed)
2821 && pVM->hm.s.fHasIoApic)
2822 )
2823 {
2824 fSetupTPRCaching = true;
2825 }
2826
2827 Log2(("\nE"));
2828
2829 /* This is not ideal, but if we don't clear the event injection in the VMCS right here,
2830 * we may end up injecting some stale event into a VM, including injecting an event that
2831 * originated before a VM reset *after* the VM has been reset. See @bugref{6220}.
2832 */
2833 VMXWriteVmcs(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, 0);
2834
2835#ifdef VBOX_STRICT
2836 {
2837 RTCCUINTREG val2;
2838
2839 rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &val2);
2840 AssertRC(rc2);
2841 Log2(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS = %08x\n", val2));
2842
2843 /* allowed zero */
2844 if ((val2 & pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0)
2845 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: zero\n"));
2846
2847 /* allowed one */
2848 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1) != 0)
2849 Log(("Invalid VMX_VMCS_CTRL_PIN_EXEC_CONTROLS: one\n"));
2850
2851 rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &val2);
2852 AssertRC(rc2);
2853 Log2(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS = %08x\n", val2));
2854
2855 /*
2856 * Must be set according to the MSR, but can be cleared if nested paging is used.
2857 */
2858 if (pVM->hm.s.fNestedPaging)
2859 {
2860 val2 |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INVLPG_EXIT
2861 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_LOAD_EXIT
2862 | VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_CR3_STORE_EXIT;
2863 }
2864
2865 /* allowed zero */
2866 if ((val2 & pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0)
2867 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: zero\n"));
2868
2869 /* allowed one */
2870 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1) != 0)
2871 Log(("Invalid VMX_VMCS_CTRL_PROC_EXEC_CONTROLS: one\n"));
2872
2873 rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &val2);
2874 AssertRC(rc2);
2875 Log2(("VMX_VMCS_CTRL_ENTRY_CONTROLS = %08x\n", val2));
2876
2877 /* allowed zero */
2878 if ((val2 & pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0)
2879 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: zero\n"));
2880
2881 /* allowed one */
2882 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_entry.n.allowed1) != 0)
2883 Log(("Invalid VMX_VMCS_CTRL_ENTRY_CONTROLS: one\n"));
2884
2885 rc2 = VMXReadVmcs(VMX_VMCS32_CTRL_EXIT_CONTROLS, &val2);
2886 AssertRC(rc2);
2887 Log2(("VMX_VMCS_CTRL_EXIT_CONTROLS = %08x\n", val2));
2888
2889 /* allowed zero */
2890 if ((val2 & pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0) != pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0)
2891 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: zero\n"));
2892
2893 /* allowed one */
2894 if ((val2 & ~pVM->hm.s.vmx.msr.vmx_exit.n.allowed1) != 0)
2895 Log(("Invalid VMX_VMCS_CTRL_EXIT_CONTROLS: one\n"));
2896 }
2897 fWasInLongMode = CPUMIsGuestInLongModeEx(pCtx);
2898#endif /* VBOX_STRICT */
2899
2900#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2901 pVCpu->hm.s.vmx.VMCSCache.u64TimeEntry = RTTimeNanoTS();
2902#endif
2903
2904 /*
2905 * We can jump to this point to resume execution after determining that a VM-exit is innocent.
2906 */
2907ResumeExecution:
2908 if (!STAM_REL_PROFILE_ADV_IS_RUNNING(&pVCpu->hm.s.StatEntry))
2909 STAM_REL_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit2, &pVCpu->hm.s.StatEntry, x);
2910 AssertMsg(pVCpu->hm.s.idEnteredCpu == RTMpCpuId(),
2911 ("Expected %d, I'm %d; cResume=%d exitReason=%RGv exitQualification=%RGv\n",
2912 (int)pVCpu->hm.s.idEnteredCpu, (int)RTMpCpuId(), cResume, exitReason, exitQualification));
2913 Assert(!HMR0SuspendPending());
2914 /* Not allowed to switch modes without reloading the host state (32->64 switcher)!! */
2915 Assert(fWasInLongMode == CPUMIsGuestInLongModeEx(pCtx));
2916
2917 /*
2918 * Safety precaution; looping for too long here can have a very bad effect on the host.
2919 */
2920 if (RT_UNLIKELY(++cResume > pVM->hm.s.cMaxResumeLoops))
2921 {
2922 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
2923 rc = VINF_EM_RAW_INTERRUPT;
2924 goto end;
2925 }
2926
2927 /*
2928 * Check for IRQ inhibition due to instruction fusing (sti, mov ss).
2929 */
2930 if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2931 {
2932 Log(("VM_FF_INHIBIT_INTERRUPTS at %RGv successor %RGv\n", (RTGCPTR)pCtx->rip, EMGetInhibitInterruptsPC(pVCpu)));
2933 if (pCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2934 {
2935 /*
2936 * Note: we intentionally don't clear VM_FF_INHIBIT_INTERRUPTS here.
2937 * Before we are able to execute this instruction in raw mode (iret to guest code) an external interrupt might
2938 * force a world switch again. Possibly allowing a guest interrupt to be dispatched in the process. This could
2939 * break the guest. Sounds very unlikely, but such timing sensitive problems are not as rare as you might think.
2940 */
2941 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2942 /* Irq inhibition is no longer active; clear the corresponding VMX state. */
2943 rc2 = VMXWriteVmcs(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, 0);
2944 AssertRC(rc2);
2945 }
2946 }
2947 else
2948 {
2949 /* Irq inhibition is no longer active; clear the corresponding VMX state. */
2950 rc2 = VMXWriteVmcs(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, 0);
2951 AssertRC(rc2);
2952 }
2953
2954#ifdef VBOX_HIGH_RES_TIMERS_HACK_IN_RING0
2955 if (RT_UNLIKELY((cResume & 0xf) == 0))
2956 {
2957 uint64_t u64CurTime = RTTimeMilliTS();
2958
2959 if (RT_UNLIKELY(u64CurTime > u64LastTime))
2960 {
2961 u64LastTime = u64CurTime;
2962 TMTimerPollVoid(pVM, pVCpu);
2963 }
2964 }
2965#endif
2966
2967 /*
2968 * Check for pending actions that force us to go back to ring-3.
2969 */
2970 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK | VM_FF_REQUEST | VM_FF_PGM_POOL_FLUSH_PENDING | VM_FF_PDM_DMA)
2971 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK | VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL | VMCPU_FF_REQUEST))
2972 {
2973 /* Check if a sync operation is pending. */
2974 if (VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
2975 {
2976 rc = PGMSyncCR3(pVCpu, pCtx->cr0, pCtx->cr3, pCtx->cr4, VMCPU_FF_ISSET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
2977 if (rc != VINF_SUCCESS)
2978 {
2979 AssertRC(VBOXSTRICTRC_VAL(rc));
2980 Log(("Pending pool sync is forcing us back to ring 3; rc=%d\n", VBOXSTRICTRC_VAL(rc)));
2981 goto end;
2982 }
2983 }
2984
2985#ifdef DEBUG
2986 /* Intercept X86_XCPT_DB if stepping is enabled */
2987 if (!DBGFIsStepping(pVCpu))
2988#endif
2989 {
2990 if ( VM_FF_ISPENDING(pVM, VM_FF_HM_TO_R3_MASK)
2991 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
2992 {
2993 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchToR3);
2994 rc = RT_UNLIKELY(VM_FF_ISPENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
2995 goto end;
2996 }
2997 }
2998
2999 /* Pending request packets might contain actions that need immediate attention, such as pending hardware interrupts. */
3000 if ( VM_FF_ISPENDING(pVM, VM_FF_REQUEST)
3001 || VMCPU_FF_ISPENDING(pVCpu, VMCPU_FF_REQUEST))
3002 {
3003 rc = VINF_EM_PENDING_REQUEST;
3004 goto end;
3005 }
3006
3007 /* Check if a pgm pool flush is in progress. */
3008 if (VM_FF_ISPENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
3009 {
3010 rc = VINF_PGM_POOL_FLUSH_PENDING;
3011 goto end;
3012 }
3013
3014 /* Check if DMA work is pending (2nd+ run). */
3015 if (VM_FF_ISPENDING(pVM, VM_FF_PDM_DMA) && cResume > 1)
3016 {
3017 rc = VINF_EM_RAW_TO_R3;
3018 goto end;
3019 }
3020 }
3021
3022#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
3023 /*
3024 * Exit to ring-3 preemption/work is pending.
3025 *
3026 * Interrupts are disabled before the call to make sure we don't miss any interrupt
3027 * that would flag preemption (IPI, timer tick, ++). (Would've been nice to do this
3028 * further down, but hmR0VmxCheckPendingInterrupt makes that impossible.)
3029 *
3030 * Note! Interrupts must be disabled done *before* we check for TLB flushes; TLB
3031 * shootdowns rely on this.
3032 */
3033 uOldEFlags = ASMIntDisableFlags();
3034 if (RTThreadPreemptIsPending(NIL_RTTHREAD))
3035 {
3036 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptPending);
3037 rc = VINF_EM_RAW_INTERRUPT;
3038 goto end;
3039 }
3040 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
3041#endif
3042
3043 /*
3044 * When external interrupts are pending, we should exit the VM when IF is set.
3045 * Note: *After* VM_FF_INHIBIT_INTERRUPTS check!
3046 */
3047 rc = hmR0VmxCheckPendingInterrupt(pVM, pVCpu, pCtx);
3048 if (RT_FAILURE(rc))
3049 goto end;
3050
3051 /** @todo check timers?? */
3052
3053 /*
3054 * TPR caching using CR8 is only available in 64-bit mode.
3055 * Note: The 32-bit exception for AMD (X86_CPUID_AMD_FEATURE_ECX_CR8L), but this appears missing in Intel CPUs.
3056 * Note: We can't do this in LoadGuestState() as PDMApicGetTPR can jump back to ring-3 (lock)!! (no longer true) .
3057 */
3058 /** @todo query and update the TPR only when it could have been changed (mmio
3059 * access & wrsmr (x2apic) */
3060 if (fSetupTPRCaching)
3061 {
3062 /* TPR caching in CR8 */
3063 bool fPending;
3064
3065 rc2 = PDMApicGetTPR(pVCpu, &u8LastTPR, &fPending);
3066 AssertRC(rc2);
3067 /* The TPR can be found at offset 0x80 in the APIC mmio page. */
3068 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8LastTPR;
3069
3070 /*
3071 * Two options here:
3072 * - external interrupt pending, but masked by the TPR value.
3073 * -> a CR8 update that lower the current TPR value should cause an exit
3074 * - no pending interrupts
3075 * -> We don't need to be explicitely notified. There are enough world switches for detecting pending interrupts.
3076 */
3077
3078 /* cr8 bits 3-0 correspond to bits 7-4 of the task priority mmio register. */
3079 rc = VMXWriteVmcs(VMX_VMCS32_CTRL_TPR_THRESHOLD, (fPending) ? (u8LastTPR >> 4) : 0);
3080 AssertRC(VBOXSTRICTRC_VAL(rc));
3081
3082 if (pVM->hm.s.fTPRPatchingActive)
3083 {
3084 Assert(!CPUMIsGuestInLongModeEx(pCtx));
3085 /* Our patch code uses LSTAR for TPR caching. */
3086 pCtx->msrLSTAR = u8LastTPR;
3087
3088 /** @todo r=ramshankar: we should check for MSR-bitmap support here. */
3089 if (fPending)
3090 {
3091 /* A TPR change could activate a pending interrupt, so catch lstar writes. */
3092 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, false);
3093 }
3094 else
3095 {
3096 /*
3097 * No interrupts are pending, so we don't need to be explicitely notified.
3098 * There are enough world switches for detecting pending interrupts.
3099 */
3100 hmR0VmxSetMSRPermission(pVCpu, MSR_K8_LSTAR, true, true);
3101 }
3102 }
3103 }
3104
3105#ifdef LOG_ENABLED
3106 if ( pVM->hm.s.fNestedPaging
3107 || pVM->hm.s.vmx.fVpid)
3108 {
3109 PHMGLOBLCPUINFO pCpu = HMR0GetCurrentCpu();
3110 if (pVCpu->hm.s.idLastCpu != pCpu->idCpu)
3111 {
3112 LogFlow(("Force TLB flush due to rescheduling to a different cpu (%d vs %d)\n", pVCpu->hm.s.idLastCpu,
3113 pCpu->idCpu));
3114 }
3115 else if (pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
3116 {
3117 LogFlow(("Force TLB flush due to changed TLB flush count (%x vs %x)\n", pVCpu->hm.s.cTlbFlushes,
3118 pCpu->cTlbFlushes));
3119 }
3120 else if (VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH))
3121 LogFlow(("Manual TLB flush\n"));
3122 }
3123#endif
3124#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
3125 PGMRZDynMapFlushAutoSet(pVCpu);
3126#endif
3127
3128 /*
3129 * NOTE: DO NOT DO ANYTHING AFTER THIS POINT THAT MIGHT JUMP BACK TO RING-3!
3130 * (until the actual world switch)
3131 */
3132#ifdef VBOX_STRICT
3133 idCpuCheck = RTMpCpuId();
3134#endif
3135#ifdef LOG_ENABLED
3136 VMMR0LogFlushDisable(pVCpu);
3137#endif
3138
3139 /*
3140 * Save the host state first.
3141 */
3142 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
3143 {
3144 rc = VMXR0SaveHostState(pVM, pVCpu);
3145 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3146 {
3147 VMMR0LogFlushEnable(pVCpu);
3148 goto end;
3149 }
3150 }
3151
3152 /*
3153 * Load the guest state.
3154 */
3155 if (!pVCpu->hm.s.fContextUseFlags)
3156 {
3157 VMXR0LoadMinimalGuestState(pVM, pVCpu, pCtx);
3158 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
3159 }
3160 else
3161 {
3162 rc = VMXR0LoadGuestState(pVM, pVCpu, pCtx);
3163 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3164 {
3165 VMMR0LogFlushEnable(pVCpu);
3166 goto end;
3167 }
3168 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
3169 }
3170
3171#ifndef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
3172 /*
3173 * Disable interrupts to make sure a poke will interrupt execution.
3174 * This must be done *before* we check for TLB flushes; TLB shootdowns rely on this.
3175 */
3176 uOldEFlags = ASMIntDisableFlags();
3177 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC);
3178#endif
3179
3180 /* Non-register state Guest Context */
3181 /** @todo change me according to cpu state */
3182 rc2 = VMXWriteVmcs(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
3183 AssertRC(rc2);
3184
3185 /* Set TLB flush state as checked until we return from the world switch. */
3186 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true);
3187 /* Deal with tagged TLB setup and invalidation. */
3188 pVM->hm.s.vmx.pfnFlushTaggedTlb(pVM, pVCpu);
3189
3190 /*
3191 * Manual save and restore:
3192 * - General purpose registers except RIP, RSP
3193 *
3194 * Trashed:
3195 * - CR2 (we don't care)
3196 * - LDTR (reset to 0)
3197 * - DRx (presumably not changed at all)
3198 * - DR7 (reset to 0x400)
3199 * - EFLAGS (reset to RT_BIT(1); not relevant)
3200 */
3201
3202 /* All done! Let's start VM execution. */
3203 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
3204 Assert(idCpuCheck == RTMpCpuId());
3205
3206#ifdef VBOX_WITH_CRASHDUMP_MAGIC
3207 pVCpu->hm.s.vmx.VMCSCache.cResume = cResume;
3208 pVCpu->hm.s.vmx.VMCSCache.u64TimeSwitch = RTTimeNanoTS();
3209#endif
3210
3211 /*
3212 * Save the current TPR value in the LSTAR MSR so our patches can access it.
3213 */
3214 if (pVM->hm.s.fTPRPatchingActive)
3215 {
3216 Assert(pVM->hm.s.fTPRPatchingActive);
3217 u64OldLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3218 ASMWrMsr(MSR_K8_LSTAR, u8LastTPR);
3219 }
3220
3221 TMNotifyStartOfExecution(pVCpu);
3222
3223#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3224 /*
3225 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
3226 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
3227 */
3228 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
3229 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
3230 {
3231 pVCpu->hm.s.u64HostTSCAux = ASMRdMsr(MSR_K8_TSC_AUX);
3232 uint64_t u64GuestTSCAux = 0;
3233 rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64GuestTSCAux);
3234 AssertRC(rc2);
3235 ASMWrMsr(MSR_K8_TSC_AUX, u64GuestTSCAux);
3236 }
3237#endif
3238
3239#ifdef VBOX_WITH_KERNEL_USING_XMM
3240 rc = hmR0VMXStartVMWrapXMM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
3241#else
3242 rc = pVCpu->hm.s.vmx.pfnStartVM(pVCpu->hm.s.fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
3243#endif
3244 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false);
3245 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits);
3246
3247 /* Possibly the last TSC value seen by the guest (too high) (only when we're in TSC offset mode). */
3248 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_RDTSC_EXIT))
3249 {
3250#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3251 /* Restore host's TSC_AUX. */
3252 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
3253 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTSCAux);
3254#endif
3255
3256 TMCpuTickSetLastSeen(pVCpu,
3257 ASMReadTSC() + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
3258 }
3259
3260 TMNotifyEndOfExecution(pVCpu);
3261 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED);
3262 Assert(!(ASMGetFlags() & X86_EFL_IF));
3263
3264 /*
3265 * Restore the host LSTAR MSR if the guest could have changed it.
3266 */
3267 if (pVM->hm.s.fTPRPatchingActive)
3268 {
3269 Assert(pVM->hm.s.fTPRPatchingActive);
3270 pVCpu->hm.s.vmx.pbVirtApic[0x80] = pCtx->msrLSTAR = ASMRdMsr(MSR_K8_LSTAR);
3271 ASMWrMsr(MSR_K8_LSTAR, u64OldLSTAR);
3272 }
3273
3274 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
3275 ASMSetFlags(uOldEFlags);
3276#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
3277 uOldEFlags = ~(RTCCUINTREG)0;
3278#endif
3279
3280 AssertMsg(!pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries, ("pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries=%d\n",
3281 pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries));
3282
3283 /* In case we execute a goto ResumeExecution later on. */
3284 pVCpu->hm.s.fResumeVM = true;
3285 pVCpu->hm.s.fForceTLBFlush = false;
3286
3287 /*
3288 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
3289 * IMPORTANT: WE CAN'T DO ANY LOGGING OR OPERATIONS THAT CAN DO A LONGJMP BACK TO RING 3 *BEFORE* WE'VE SYNCED BACK (MOST OF) THE GUEST STATE
3290 * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
3291 */
3292
3293 if (RT_UNLIKELY(rc != VINF_SUCCESS))
3294 {
3295 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx);
3296 VMMR0LogFlushEnable(pVCpu);
3297 goto end;
3298 }
3299
3300 /* Success. Query the guest state and figure out what has happened. */
3301
3302 /* Investigate why there was a VM-exit. */
3303 rc2 = VMXReadCachedVmcs(VMX_VMCS32_RO_EXIT_REASON, &exitReason);
3304 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[exitReason & MASK_EXITREASON_STAT]);
3305
3306 exitReason &= 0xffff; /* bit 0-15 contain the exit code. */
3307 rc2 |= VMXReadCachedVmcs(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
3308 rc2 |= VMXReadCachedVmcs(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &cbInstr);
3309 rc2 |= VMXReadCachedVmcs(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &intInfo);
3310 /* might not be valid; depends on VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID. */
3311 rc2 |= VMXReadCachedVmcs(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE, &errCode);
3312 rc2 |= VMXReadCachedVmcs(VMX_VMCS32_RO_EXIT_INSTR_INFO, &instrInfo);
3313 rc2 |= VMXReadCachedVmcs(VMX_VMCS_RO_EXIT_QUALIFICATION, &exitQualification);
3314 AssertRC(rc2);
3315
3316 /*
3317 * Sync back the guest state.
3318 */
3319 rc2 = VMXR0SaveGuestState(pVM, pVCpu, pCtx);
3320 AssertRC(rc2);
3321
3322 /* Note! NOW IT'S SAFE FOR LOGGING! */
3323 VMMR0LogFlushEnable(pVCpu);
3324 Log2(("Raw exit reason %08x\n", exitReason));
3325#if ARCH_BITS == 64 /* for the time being */
3326 VBOXVMM_R0_HMVMX_VMEXIT(pVCpu, pCtx, exitReason);
3327#endif
3328
3329 /*
3330 * Check if an injected event was interrupted prematurely.
3331 */
3332 rc2 = VMXReadCachedVmcs(VMX_VMCS32_RO_IDT_INFO, &val);
3333 AssertRC(rc2);
3334 pVCpu->hm.s.Event.u64IntrInfo = VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(val);
3335 if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.u64IntrInfo)
3336 /* Ignore 'int xx' as they'll be restarted anyway. */
3337 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT
3338 /* Ignore software exceptions (such as int3) as they'll reoccur when we restart the instruction anyway. */
3339 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo) != VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
3340 {
3341 Assert(!pVCpu->hm.s.Event.fPending);
3342 pVCpu->hm.s.Event.fPending = true;
3343 /* Error code present? */
3344 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo))
3345 {
3346 rc2 = VMXReadCachedVmcs(VMX_VMCS32_RO_IDT_ERRCODE, &val);
3347 AssertRC(rc2);
3348 pVCpu->hm.s.Event.u32ErrCode = val;
3349 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv pending error=%RX64\n",
3350 pVCpu->hm.s.Event.u64IntrInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification, val));
3351 }
3352 else
3353 {
3354 Log(("Pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n", pVCpu->hm.s.Event.u64IntrInfo,
3355 (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
3356 pVCpu->hm.s.Event.u32ErrCode = 0;
3357 }
3358 }
3359#ifdef VBOX_STRICT
3360 else if ( VMX_EXIT_INTERRUPTION_INFO_VALID(pVCpu->hm.s.Event.u64IntrInfo)
3361 /* Ignore software exceptions (such as int3) as they're reoccur when we restart the instruction anyway. */
3362 && VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
3363 {
3364 Log(("Ignore pending inject %RX64 at %RGv exit=%08x intInfo=%08x exitQualification=%RGv\n",
3365 pVCpu->hm.s.Event.u64IntrInfo, (RTGCPTR)pCtx->rip, exitReason, intInfo, exitQualification));
3366 }
3367
3368 if (exitReason == VMX_EXIT_ERR_INVALID_GUEST_STATE)
3369 HMDumpRegs(pVM, pVCpu, pCtx);
3370#endif
3371
3372 Log2(("E%d: New EIP=%x:%RGv\n", (uint32_t)exitReason, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
3373 Log2(("Exit reason %d, exitQualification %RGv\n", (uint32_t)exitReason, exitQualification));
3374 Log2(("instrInfo=%d instrError=%d instr length=%d\n", (uint32_t)instrInfo, (uint32_t)instrError, (uint32_t)cbInstr));
3375 Log2(("Interruption error code %d\n", (uint32_t)errCode));
3376 Log2(("IntInfo = %08x\n", (uint32_t)intInfo));
3377
3378 /*
3379 * Sync back the TPR if it was changed.
3380 */
3381 if ( fSetupTPRCaching
3382 && u8LastTPR != pVCpu->hm.s.vmx.pbVirtApic[0x80])
3383 {
3384 rc2 = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
3385 AssertRC(rc2);
3386 }
3387
3388#ifdef DBGFTRACE_ENABLED /** @todo DTrace later. */
3389 RTTraceBufAddMsgF(pVM->CTX_SUFF(hTraceBuf), "vmexit %08x %016RX64 at %04:%08RX64 %RX64",
3390 exitReason, (uint64_t)exitQualification, pCtx->cs.Sel, pCtx->rip, (uint64_t)intInfo);
3391#endif
3392 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
3393
3394 /* Some cases don't need a complete resync of the guest CPU state; handle them here. */
3395 Assert(rc == VINF_SUCCESS); /* might consider VERR_IPE_UNINITIALIZED_STATUS here later... */
3396 switch (exitReason)
3397 {
3398 case VMX_EXIT_XCPT_NMI: /* 0 Exception or non-maskable interrupt (NMI). */
3399 case VMX_EXIT_EXT_INT: /* 1 External interrupt. */
3400 {
3401 uint32_t vector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(intInfo);
3402
3403 if (!VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
3404 {
3405 Assert(exitReason == VMX_EXIT_EXT_INT);
3406 /* External interrupt; leave to allow it to be dispatched again. */
3407 rc = VINF_EM_RAW_INTERRUPT;
3408 break;
3409 }
3410 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub3, y3);
3411 switch (VMX_EXIT_INTERRUPTION_INFO_TYPE(intInfo))
3412 {
3413 case VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI: /* Non-maskable interrupt. */
3414 /* External interrupt; leave to allow it to be dispatched again. */
3415 rc = VINF_EM_RAW_INTERRUPT;
3416 break;
3417
3418 case VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT: /* External hardware interrupt. */
3419 AssertFailed(); /* can't come here; fails the first check. */
3420 break;
3421
3422 case VMX_EXIT_INTERRUPTION_INFO_TYPE_DB_XCPT: /* Unknown why we get this type for #DB */
3423 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
3424 Assert(vector == 1 || vector == 3 || vector == 4);
3425 /* no break */
3426 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT: /* Hardware exception. */
3427 Log2(("Hardware/software interrupt %d\n", vector));
3428 switch (vector)
3429 {
3430 case X86_XCPT_NM:
3431 {
3432 Log(("#NM fault at %RGv error code %x\n", (RTGCPTR)pCtx->rip, errCode));
3433
3434 /** @todo don't intercept #NM exceptions anymore when we've activated the guest FPU state. */
3435 /* If we sync the FPU/XMM state on-demand, then we can continue execution as if nothing has happened. */
3436 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pCtx);
3437 if (rc == VINF_SUCCESS)
3438 {
3439 Assert(CPUMIsGuestFPUStateActive(pVCpu));
3440
3441 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
3442
3443 /* Continue execution. */
3444 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
3445
3446 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3447 goto ResumeExecution;
3448 }
3449
3450 Log(("Forward #NM fault to the guest\n"));
3451 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
3452 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3453 cbInstr, 0);
3454 AssertRC(rc2);
3455 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3456 goto ResumeExecution;
3457 }
3458
3459 case X86_XCPT_PF: /* Page fault */
3460 {
3461#ifdef VBOX_ALWAYS_TRAP_PF
3462 if (pVM->hm.s.fNestedPaging)
3463 {
3464 /*
3465 * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
3466 */
3467 Log(("Guest page fault at %RGv cr2=%RGv error code %RGv rsp=%RGv\n", (RTGCPTR)pCtx->rip, exitQualification,
3468 errCode, (RTGCPTR)pCtx->rsp));
3469
3470 Assert(CPUMIsGuestInPagedProtectedModeEx(pCtx));
3471
3472 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
3473
3474 /* Now we must update CR2. */
3475 pCtx->cr2 = exitQualification;
3476 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3477 cbInstr, errCode);
3478 AssertRC(rc2);
3479
3480 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3481 goto ResumeExecution;
3482 }
3483#else
3484 Assert(!pVM->hm.s.fNestedPaging);
3485#endif
3486
3487#ifdef VBOX_HM_WITH_GUEST_PATCHING
3488 /* Shortcut for APIC TPR reads and writes; 32 bits guests only */
3489 if ( pVM->hm.s.fTRPPatchingAllowed
3490 && pVM->hm.s.pGuestPatchMem
3491 && (exitQualification & 0xfff) == 0x080
3492 && !(errCode & X86_TRAP_PF_P) /* not present */
3493 && CPUMGetGuestCPL(pVCpu) == 0
3494 && !CPUMIsGuestInLongModeEx(pCtx)
3495 && pVM->hm.s.cPatches < RT_ELEMENTS(pVM->hm.s.aPatches))
3496 {
3497 RTGCPHYS GCPhysApicBase, GCPhys;
3498 GCPhysApicBase = pCtx->msrApicBase;
3499 GCPhysApicBase &= PAGE_BASE_GC_MASK;
3500
3501 rc = PGMGstGetPage(pVCpu, (RTGCPTR)exitQualification, NULL, &GCPhys);
3502 if ( rc == VINF_SUCCESS
3503 && GCPhys == GCPhysApicBase)
3504 {
3505 /* Only attempt to patch the instruction once. */
3506 PHMTPRPATCH pPatch = (PHMTPRPATCH)RTAvloU32Get(&pVM->hm.s.PatchTree, (AVLOU32KEY)pCtx->eip);
3507 if (!pPatch)
3508 {
3509 rc = VINF_EM_HM_PATCH_TPR_INSTR;
3510 break;
3511 }
3512 }
3513 }
3514#endif
3515
3516 Log2(("Page fault at %RGv error code %x\n", exitQualification, errCode));
3517 /* Exit qualification contains the linear address of the page fault. */
3518 TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
3519 TRPMSetErrorCode(pVCpu, errCode);
3520 TRPMSetFaultAddress(pVCpu, exitQualification);
3521
3522 /* Shortcut for APIC TPR reads and writes. */
3523 if ( (exitQualification & 0xfff) == 0x080
3524 && !(errCode & X86_TRAP_PF_P) /* not present */
3525 && fSetupTPRCaching
3526 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
3527 {
3528 RTGCPHYS GCPhysApicBase, GCPhys;
3529 GCPhysApicBase = pCtx->msrApicBase;
3530 GCPhysApicBase &= PAGE_BASE_GC_MASK;
3531
3532 rc = PGMGstGetPage(pVCpu, (RTGCPTR)exitQualification, NULL, &GCPhys);
3533 if ( rc == VINF_SUCCESS
3534 && GCPhys == GCPhysApicBase)
3535 {
3536 Log(("Enable VT-x virtual APIC access filtering\n"));
3537 rc2 = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess,
3538 X86_PTE_RW | X86_PTE_P);
3539 AssertRC(rc2);
3540 }
3541 }
3542
3543 /* Forward it to our trap handler first, in case our shadow pages are out of sync. */
3544 rc = PGMTrap0eHandler(pVCpu, errCode, CPUMCTX2CORE(pCtx), (RTGCPTR)exitQualification);
3545 Log2(("PGMTrap0eHandler %RGv returned %Rrc\n", (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc)));
3546
3547 if (rc == VINF_SUCCESS)
3548 { /* We've successfully synced our shadow pages, so let's just continue execution. */
3549 Log2(("Shadow page fault at %RGv cr2=%RGv error code %x\n", (RTGCPTR)pCtx->rip, exitQualification ,errCode));
3550 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
3551
3552 TRPMResetTrap(pVCpu);
3553 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3554 goto ResumeExecution;
3555 }
3556 else if (rc == VINF_EM_RAW_GUEST_TRAP)
3557 {
3558 /*
3559 * A genuine pagefault. Forward the trap to the guest by injecting the exception and resuming execution.
3560 */
3561 Log2(("Forward page fault to the guest\n"));
3562
3563 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
3564 /* The error code might have been changed. */
3565 errCode = TRPMGetErrorCode(pVCpu);
3566
3567 TRPMResetTrap(pVCpu);
3568
3569 /* Now we must update CR2. */
3570 pCtx->cr2 = exitQualification;
3571 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3572 cbInstr, errCode);
3573 AssertRC(rc2);
3574
3575 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3576 goto ResumeExecution;
3577 }
3578#ifdef VBOX_STRICT
3579 if (rc != VINF_EM_RAW_EMULATE_INSTR && rc != VINF_EM_RAW_EMULATE_IO_BLOCK)
3580 Log2(("PGMTrap0eHandler failed with %d\n", VBOXSTRICTRC_VAL(rc)));
3581#endif
3582 /* Need to go back to the recompiler to emulate the instruction. */
3583 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
3584 TRPMResetTrap(pVCpu);
3585
3586 /* If event delivery caused the #PF (shadow or not), tell TRPM. */
3587 hmR0VmxCheckPendingEvent(pVCpu);
3588 break;
3589 }
3590
3591 case X86_XCPT_MF: /* Floating point exception. */
3592 {
3593 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
3594 if (!(pCtx->cr0 & X86_CR0_NE))
3595 {
3596 /* old style FPU error reporting needs some extra work. */
3597 /** @todo don't fall back to the recompiler, but do it manually. */
3598 rc = VINF_EM_RAW_EMULATE_INSTR;
3599 break;
3600 }
3601 Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
3602 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3603 cbInstr, errCode);
3604 AssertRC(rc2);
3605
3606 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3607 goto ResumeExecution;
3608 }
3609
3610 case X86_XCPT_DB: /* Debug exception. */
3611 {
3612 uint64_t uDR6;
3613
3614 /*
3615 * DR6, DR7.GD and IA32_DEBUGCTL.LBR are not updated yet.
3616 *
3617 * Exit qualification bits:
3618 * 3:0 B0-B3 which breakpoint condition was met
3619 * 12:4 Reserved (0)
3620 * 13 BD - debug register access detected
3621 * 14 BS - single step execution or branch taken
3622 * 63:15 Reserved (0)
3623 */
3624 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
3625
3626 /* Note that we don't support guest and host-initiated debugging at the same time. */
3627
3628 uDR6 = X86_DR6_INIT_VAL;
3629 uDR6 |= (exitQualification & (X86_DR6_B0|X86_DR6_B1|X86_DR6_B2|X86_DR6_B3|X86_DR6_BD|X86_DR6_BS));
3630 rc = DBGFRZTrap01Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx), uDR6);
3631 if (rc == VINF_EM_RAW_GUEST_TRAP)
3632 {
3633 /* Update DR6 here. */
3634 pCtx->dr[6] = uDR6;
3635
3636 /* Resync DR6 if the debug state is active. */
3637 if (CPUMIsGuestDebugStateActive(pVCpu))
3638 ASMSetDR6(pCtx->dr[6]);
3639
3640 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
3641 pCtx->dr[7] &= ~X86_DR7_GD;
3642
3643 /* Paranoia. */
3644 pCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
3645 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
3646 pCtx->dr[7] |= 0x400; /* must be one */
3647
3648 /* Resync DR7 */
3649 rc2 = VMXWriteVmcs64(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
3650 AssertRC(rc2);
3651
3652 Log(("Trap %x (debug) at %RGv exit qualification %RX64 dr6=%x dr7=%x\n", vector, (RTGCPTR)pCtx->rip,
3653 exitQualification, (uint32_t)pCtx->dr[6], (uint32_t)pCtx->dr[7]));
3654 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3655 cbInstr, errCode);
3656 AssertRC(rc2);
3657
3658 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3659 goto ResumeExecution;
3660 }
3661 /* Return to ring 3 to deal with the debug exit code. */
3662 Log(("Debugger hardware BP at %04x:%RGv (rc=%Rrc)\n", pCtx->cs.Sel, pCtx->rip, VBOXSTRICTRC_VAL(rc)));
3663 break;
3664 }
3665
3666 case X86_XCPT_BP: /* Breakpoint. */
3667 {
3668 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
3669 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pCtx));
3670 if (rc == VINF_EM_RAW_GUEST_TRAP)
3671 {
3672 Log(("Guest #BP at %04x:%RGv\n", pCtx->cs.Sel, pCtx->rip));
3673 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3674 cbInstr, errCode);
3675 AssertRC(rc2);
3676 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3677 goto ResumeExecution;
3678 }
3679 if (rc == VINF_SUCCESS)
3680 {
3681 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3682 goto ResumeExecution;
3683 }
3684 Log(("Debugger BP at %04x:%RGv (rc=%Rrc)\n", pCtx->cs.Sel, pCtx->rip, VBOXSTRICTRC_VAL(rc)));
3685 break;
3686 }
3687
3688 case X86_XCPT_GP: /* General protection failure exception. */
3689 {
3690 uint32_t cbOp;
3691 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
3692
3693 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
3694#ifdef VBOX_STRICT
3695 if ( !CPUMIsGuestInRealModeEx(pCtx)
3696 || !pVM->hm.s.vmx.pRealModeTSS)
3697 {
3698 Log(("Trap %x at %04X:%RGv errorCode=%RGv\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, errCode));
3699 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3700 cbInstr, errCode);
3701 AssertRC(rc2);
3702 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3703 goto ResumeExecution;
3704 }
3705#endif
3706 Assert(CPUMIsGuestInRealModeEx(pCtx));
3707
3708 LogFlow(("Real mode X86_XCPT_GP instruction emulation at %x:%RGv\n", pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
3709
3710 rc2 = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
3711 if (RT_SUCCESS(rc2))
3712 {
3713 bool fUpdateRIP = true;
3714
3715 rc = VINF_SUCCESS;
3716 Assert(cbOp == pDis->cbInstr);
3717 switch (pDis->pCurInstr->uOpcode)
3718 {
3719 case OP_CLI:
3720 pCtx->eflags.Bits.u1IF = 0;
3721 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
3722 break;
3723
3724 case OP_STI:
3725 pCtx->eflags.Bits.u1IF = 1;
3726 EMSetInhibitInterruptsPC(pVCpu, pCtx->rip + pDis->cbInstr);
3727 Assert(VMCPU_FF_ISSET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
3728 rc2 = VMXWriteVmcs(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE,
3729 VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
3730 AssertRC(rc2);
3731 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
3732 break;
3733
3734 case OP_HLT:
3735 fUpdateRIP = false;
3736 rc = VINF_EM_HALT;
3737 pCtx->rip += pDis->cbInstr;
3738 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
3739 break;
3740
3741 case OP_POPF:
3742 {
3743 RTGCPTR GCPtrStack;
3744 uint32_t cbParm;
3745 uint32_t uMask;
3746 X86EFLAGS eflags;
3747
3748 if (pDis->fPrefix & DISPREFIX_OPSIZE)
3749 {
3750 cbParm = 4;
3751 uMask = 0xffffffff;
3752 }
3753 else
3754 {
3755 cbParm = 2;
3756 uMask = 0xffff;
3757 }
3758
3759 rc2 = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, 0, &GCPtrStack);
3760 if (RT_FAILURE(rc2))
3761 {
3762 rc = VERR_EM_INTERPRETER;
3763 break;
3764 }
3765 eflags.u = 0;
3766 rc2 = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &eflags.u, cbParm);
3767 if (RT_FAILURE(rc2))
3768 {
3769 rc = VERR_EM_INTERPRETER;
3770 break;
3771 }
3772 LogFlow(("POPF %x -> %RGv mask=%x\n", eflags.u, pCtx->rsp, uMask));
3773 pCtx->eflags.u = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask))
3774 | (eflags.u & X86_EFL_POPF_BITS & uMask);
3775 /* RF cleared when popped in real mode; see pushf description in AMD manual. */
3776 pCtx->eflags.Bits.u1RF = 0;
3777 pCtx->esp += cbParm;
3778 pCtx->esp &= uMask;
3779
3780 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
3781 break;
3782 }
3783
3784 case OP_PUSHF:
3785 {
3786 RTGCPTR GCPtrStack;
3787 uint32_t cbParm;
3788 uint32_t uMask;
3789 X86EFLAGS eflags;
3790
3791 if (pDis->fPrefix & DISPREFIX_OPSIZE)
3792 {
3793 cbParm = 4;
3794 uMask = 0xffffffff;
3795 }
3796 else
3797 {
3798 cbParm = 2;
3799 uMask = 0xffff;
3800 }
3801
3802 rc2 = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), (pCtx->esp - cbParm) & uMask, 0,
3803 &GCPtrStack);
3804 if (RT_FAILURE(rc2))
3805 {
3806 rc = VERR_EM_INTERPRETER;
3807 break;
3808 }
3809 eflags = pCtx->eflags;
3810 /* RF & VM cleared when pushed in real mode; see pushf description in AMD manual. */
3811 eflags.Bits.u1RF = 0;
3812 eflags.Bits.u1VM = 0;
3813
3814 rc2 = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &eflags.u, cbParm);
3815 if (RT_FAILURE(rc2))
3816 {
3817 rc = VERR_EM_INTERPRETER;
3818 break;
3819 }
3820 LogFlow(("PUSHF %x -> %RGv\n", eflags.u, GCPtrStack));
3821 pCtx->esp -= cbParm;
3822 pCtx->esp &= uMask;
3823 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
3824 break;
3825 }
3826
3827 case OP_IRET:
3828 {
3829 RTGCPTR GCPtrStack;
3830 uint32_t uMask = 0xffff;
3831 uint16_t aIretFrame[3];
3832
3833 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
3834 {
3835 rc = VERR_EM_INTERPRETER;
3836 break;
3837 }
3838
3839 rc2 = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pCtx), pCtx->esp & uMask, 0, &GCPtrStack);
3840 if (RT_FAILURE(rc2))
3841 {
3842 rc = VERR_EM_INTERPRETER;
3843 break;
3844 }
3845 rc2 = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
3846 if (RT_FAILURE(rc2))
3847 {
3848 rc = VERR_EM_INTERPRETER;
3849 break;
3850 }
3851 pCtx->ip = aIretFrame[0];
3852 pCtx->cs.Sel = aIretFrame[1];
3853 pCtx->cs.ValidSel = aIretFrame[1];
3854 pCtx->cs.u64Base = (uint32_t)pCtx->cs.Sel << 4;
3855 pCtx->eflags.u = (pCtx->eflags.u & ~(X86_EFL_POPF_BITS & uMask))
3856 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
3857 pCtx->sp += sizeof(aIretFrame);
3858
3859 LogFlow(("iret to %04x:%x\n", pCtx->cs.Sel, pCtx->ip));
3860 fUpdateRIP = false;
3861 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
3862 break;
3863 }
3864
3865 case OP_INT:
3866 {
3867 uint32_t intInfo2;
3868
3869 LogFlow(("Realmode: INT %x\n", pDis->Param1.uValue & 0xff));
3870 intInfo2 = pDis->Param1.uValue & 0xff;
3871 intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3872 intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3873
3874 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3875 AssertRC(VBOXSTRICTRC_VAL(rc));
3876 fUpdateRIP = false;
3877 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
3878 break;
3879 }
3880
3881 case OP_INTO:
3882 {
3883 if (pCtx->eflags.Bits.u1OF)
3884 {
3885 uint32_t intInfo2;
3886
3887 LogFlow(("Realmode: INTO\n"));
3888 intInfo2 = X86_XCPT_OF;
3889 intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3890 intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3891
3892 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3893 AssertRC(VBOXSTRICTRC_VAL(rc));
3894 fUpdateRIP = false;
3895 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
3896 }
3897 break;
3898 }
3899
3900 case OP_INT3:
3901 {
3902 uint32_t intInfo2;
3903
3904 LogFlow(("Realmode: INT 3\n"));
3905 intInfo2 = 3;
3906 intInfo2 |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
3907 intInfo2 |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
3908
3909 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, intInfo2, cbOp, 0);
3910 AssertRC(VBOXSTRICTRC_VAL(rc));
3911 fUpdateRIP = false;
3912 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
3913 break;
3914 }
3915
3916 default:
3917 rc = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pCtx), 0, EMCODETYPE_SUPERVISOR);
3918 fUpdateRIP = false;
3919 break;
3920 }
3921
3922 if (rc == VINF_SUCCESS)
3923 {
3924 if (fUpdateRIP)
3925 pCtx->rip += cbOp; /* Move on to the next instruction. */
3926
3927 /*
3928 * LIDT, LGDT can end up here. In the future CRx changes as well. Just reload the
3929 * whole context to be done with it.
3930 */
3931 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
3932
3933 /* Only resume if successful. */
3934 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3935 goto ResumeExecution;
3936 }
3937 }
3938 else
3939 rc = VERR_EM_INTERPRETER;
3940
3941 AssertMsg(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
3942 ("Unexpected rc=%Rrc\n", VBOXSTRICTRC_VAL(rc)));
3943 break;
3944 }
3945
3946#ifdef VBOX_STRICT
3947 case X86_XCPT_XF: /* SIMD exception. */
3948 case X86_XCPT_DE: /* Divide error. */
3949 case X86_XCPT_UD: /* Unknown opcode exception. */
3950 case X86_XCPT_SS: /* Stack segment exception. */
3951 case X86_XCPT_NP: /* Segment not present exception. */
3952 {
3953 switch (vector)
3954 {
3955 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE); break;
3956 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD); break;
3957 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS); break;
3958 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP); break;
3959 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF); break;
3960 }
3961
3962 Log(("Trap %x at %04X:%RGv\n", vector, pCtx->cs.Sel, (RTGCPTR)pCtx->rip));
3963 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3964 cbInstr, errCode);
3965 AssertRC(rc2);
3966
3967 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3968 goto ResumeExecution;
3969 }
3970#endif
3971 default:
3972 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
3973 if ( CPUMIsGuestInRealModeEx(pCtx)
3974 && pVM->hm.s.vmx.pRealModeTSS)
3975 {
3976 Log(("Real Mode Trap %x at %04x:%04X error code %x\n", vector, pCtx->cs.Sel, pCtx->eip, errCode));
3977 rc = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
3978 cbInstr, errCode);
3979 AssertRC(VBOXSTRICTRC_VAL(rc)); /* Strict RC check below. */
3980
3981 /* Go back to ring-3 in case of a triple fault. */
3982 if ( vector == X86_XCPT_DF
3983 && rc == VINF_EM_RESET)
3984 {
3985 break;
3986 }
3987
3988 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
3989 goto ResumeExecution;
3990 }
3991 AssertMsgFailed(("Unexpected vm-exit caused by exception %x\n", vector));
3992 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
3993 break;
3994 } /* switch (vector) */
3995
3996 break;
3997
3998 default:
3999 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
4000 AssertMsgFailed(("Unexpected interruption code %x\n", intInfo));
4001 break;
4002 }
4003
4004 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub3, y3);
4005 break;
4006 }
4007
4008 /*
4009 * 48 EPT violation. An attempt to access memory with a guest-physical address was disallowed
4010 * by the configuration of the EPT paging structures.
4011 */
4012 case VMX_EXIT_EPT_VIOLATION:
4013 {
4014 RTGCPHYS GCPhys;
4015
4016 Assert(pVM->hm.s.fNestedPaging);
4017
4018 rc2 = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
4019 AssertRC(rc2);
4020 Assert(((exitQualification >> 7) & 3) != 2);
4021
4022 /* Determine the kind of violation. */
4023 errCode = 0;
4024 if (exitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
4025 errCode |= X86_TRAP_PF_ID;
4026
4027 if (exitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
4028 errCode |= X86_TRAP_PF_RW;
4029
4030 /* If the page is present, then it's a page level protection fault. */
4031 if (exitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
4032 errCode |= X86_TRAP_PF_P;
4033 else
4034 {
4035 /* Shortcut for APIC TPR reads and writes. */
4036 if ( (GCPhys & 0xfff) == 0x080
4037 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */
4038 && fSetupTPRCaching
4039 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
4040 {
4041 RTGCPHYS GCPhysApicBase;
4042 GCPhysApicBase = pCtx->msrApicBase;
4043 GCPhysApicBase &= PAGE_BASE_GC_MASK;
4044 if (GCPhys == GCPhysApicBase + 0x80)
4045 {
4046 Log(("Enable VT-x virtual APIC access filtering\n"));
4047 rc2 = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess,
4048 X86_PTE_RW | X86_PTE_P);
4049 AssertRC(rc2);
4050 }
4051 }
4052 }
4053 Log(("EPT Page fault %x at %RGp error code %x\n", (uint32_t)exitQualification, GCPhys, errCode));
4054
4055 /* GCPhys contains the guest physical address of the page fault. */
4056 TRPMAssertTrap(pVCpu, X86_XCPT_PF, TRPM_TRAP);
4057 TRPMSetErrorCode(pVCpu, errCode);
4058 TRPMSetFaultAddress(pVCpu, GCPhys);
4059
4060 /* Handle the pagefault trap for the nested shadow table. */
4061 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, errCode, CPUMCTX2CORE(pCtx), GCPhys);
4062
4063 /*
4064 * Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment below, @bugref{6043}.
4065 */
4066 if ( rc == VINF_SUCCESS
4067 || rc == VERR_PAGE_TABLE_NOT_PRESENT
4068 || rc == VERR_PAGE_NOT_PRESENT)
4069 {
4070 /* We've successfully synced our shadow pages, so let's just continue execution. */
4071 Log2(("Shadow page fault at %RGv cr2=%RGp error code %x\n", (RTGCPTR)pCtx->rip, exitQualification , errCode));
4072 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
4073
4074 TRPMResetTrap(pVCpu);
4075 goto ResumeExecution;
4076 }
4077
4078#ifdef VBOX_STRICT
4079 if (rc != VINF_EM_RAW_EMULATE_INSTR)
4080 LogFlow(("PGMTrap0eHandlerNestedPaging at %RGv failed with %Rrc\n", (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc)));
4081#endif
4082 /* Need to go back to the recompiler to emulate the instruction. */
4083 TRPMResetTrap(pVCpu);
4084 break;
4085 }
4086
4087 case VMX_EXIT_EPT_MISCONFIG:
4088 {
4089 RTGCPHYS GCPhys;
4090
4091 Assert(pVM->hm.s.fNestedPaging);
4092
4093 rc2 = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
4094 AssertRC(rc2);
4095 Log(("VMX_EXIT_EPT_MISCONFIG for %RGp\n", GCPhys));
4096
4097 /* Shortcut for APIC TPR reads and writes. */
4098 if ( (GCPhys & 0xfff) == 0x080
4099 && GCPhys > 0x1000000 /* to skip VGA frame buffer accesses */
4100 && fSetupTPRCaching
4101 && (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
4102 {
4103 RTGCPHYS GCPhysApicBase = pCtx->msrApicBase;
4104 GCPhysApicBase &= PAGE_BASE_GC_MASK;
4105 if (GCPhys == GCPhysApicBase + 0x80)
4106 {
4107 Log(("Enable VT-x virtual APIC access filtering\n"));
4108 rc2 = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess,
4109 X86_PTE_RW | X86_PTE_P);
4110 AssertRC(rc2);
4111 }
4112 }
4113
4114 rc = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pCtx), GCPhys, UINT32_MAX);
4115
4116 /*
4117 * If we succeed, resume execution.
4118 * Or, if fail in interpreting the instruction because we couldn't get the guest physical address
4119 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
4120 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
4121 * weird case. See @bugref{6043}.
4122 */
4123 if ( rc == VINF_SUCCESS
4124 || rc == VERR_PAGE_TABLE_NOT_PRESENT
4125 || rc == VERR_PAGE_NOT_PRESENT)
4126 {
4127 Log2(("PGMR0Trap0eHandlerNPMisconfig(,,,%RGp) at %RGv -> resume\n", GCPhys, (RTGCPTR)pCtx->rip));
4128 goto ResumeExecution;
4129 }
4130
4131 Log2(("PGMR0Trap0eHandlerNPMisconfig(,,,%RGp) at %RGv -> %Rrc\n", GCPhys, (RTGCPTR)pCtx->rip, VBOXSTRICTRC_VAL(rc)));
4132 break;
4133 }
4134
4135 case VMX_EXIT_INT_WINDOW: /* 7 Interrupt window exiting. */
4136 /* Clear VM-exit on IF=1 change. */
4137 LogFlow(("VMX_EXIT_INT_WINDOW %RGv pending=%d IF=%d\n", (RTGCPTR)pCtx->rip,
4138 VMCPU_FF_ISPENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC|VMCPU_FF_INTERRUPT_PIC)), pCtx->eflags.Bits.u1IF));
4139 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_INT_WINDOW_EXIT;
4140 rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
4141 AssertRC(rc2);
4142 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
4143 goto ResumeExecution; /* we check for pending guest interrupts there */
4144
4145 case VMX_EXIT_WBINVD: /* 54 Guest software attempted to execute WBINVD. (conditional) */
4146 case VMX_EXIT_INVD: /* 13 Guest software attempted to execute INVD. (unconditional) */
4147 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
4148 /* Skip instruction and continue directly. */
4149 pCtx->rip += cbInstr;
4150 /* Continue execution.*/
4151 goto ResumeExecution;
4152
4153 case VMX_EXIT_CPUID: /* 10 Guest software attempted to execute CPUID. */
4154 {
4155 Log2(("VMX: Cpuid %x\n", pCtx->eax));
4156 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
4157 rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4158 if (rc == VINF_SUCCESS)
4159 {
4160 /* Update EIP and continue execution. */
4161 Assert(cbInstr == 2);
4162 pCtx->rip += cbInstr;
4163 goto ResumeExecution;
4164 }
4165 AssertMsgFailed(("EMU: cpuid failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
4166 rc = VINF_EM_RAW_EMULATE_INSTR;
4167 break;
4168 }
4169
4170 case VMX_EXIT_RDPMC: /* 15 Guest software attempted to execute RDPMC. */
4171 {
4172 Log2(("VMX: Rdpmc %x\n", pCtx->ecx));
4173 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
4174 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4175 if (rc == VINF_SUCCESS)
4176 {
4177 /* Update EIP and continue execution. */
4178 Assert(cbInstr == 2);
4179 pCtx->rip += cbInstr;
4180 goto ResumeExecution;
4181 }
4182 rc = VINF_EM_RAW_EMULATE_INSTR;
4183 break;
4184 }
4185
4186 case VMX_EXIT_RDTSC: /* 16 Guest software attempted to execute RDTSC. */
4187 {
4188 Log2(("VMX: Rdtsc\n"));
4189 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
4190 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4191 if (rc == VINF_SUCCESS)
4192 {
4193 /* Update EIP and continue execution. */
4194 Assert(cbInstr == 2);
4195 pCtx->rip += cbInstr;
4196 goto ResumeExecution;
4197 }
4198 rc = VINF_EM_RAW_EMULATE_INSTR;
4199 break;
4200 }
4201
4202 case VMX_EXIT_RDTSCP: /* 51 Guest software attempted to execute RDTSCP. */
4203 {
4204 Log2(("VMX: Rdtscp\n"));
4205 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtscp);
4206 rc = EMInterpretRdtscp(pVM, pVCpu, pCtx);
4207 if (rc == VINF_SUCCESS)
4208 {
4209 /* Update EIP and continue execution. */
4210 Assert(cbInstr == 3);
4211 pCtx->rip += cbInstr;
4212 goto ResumeExecution;
4213 }
4214 rc = VINF_EM_RAW_EMULATE_INSTR;
4215 break;
4216 }
4217
4218 case VMX_EXIT_INVLPG: /* 14 Guest software attempted to execute INVLPG. */
4219 {
4220 Log2(("VMX: invlpg\n"));
4221 Assert(!pVM->hm.s.fNestedPaging);
4222
4223 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
4224 rc = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pCtx), exitQualification);
4225 if (rc == VINF_SUCCESS)
4226 {
4227 /* Update EIP and continue execution. */
4228 pCtx->rip += cbInstr;
4229 goto ResumeExecution;
4230 }
4231 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: invlpg %RGv failed with %Rrc\n", exitQualification, VBOXSTRICTRC_VAL(rc)));
4232 break;
4233 }
4234
4235 case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */
4236 {
4237 Log2(("VMX: monitor\n"));
4238
4239 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
4240 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4241 if (rc == VINF_SUCCESS)
4242 {
4243 /* Update EIP and continue execution. */
4244 pCtx->rip += cbInstr;
4245 goto ResumeExecution;
4246 }
4247 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: monitor failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
4248 break;
4249 }
4250
4251 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */
4252 /* When an interrupt is pending, we'll let MSR_K8_LSTAR writes fault in our TPR patch code. */
4253 if ( pVM->hm.s.fTPRPatchingActive
4254 && pCtx->ecx == MSR_K8_LSTAR)
4255 {
4256 Assert(!CPUMIsGuestInLongModeEx(pCtx));
4257 if ((pCtx->eax & 0xff) != u8LastTPR)
4258 {
4259 Log(("VMX: Faulting MSR_K8_LSTAR write with new TPR value %x\n", pCtx->eax & 0xff));
4260
4261 /* Our patch code uses LSTAR for TPR caching. */
4262 rc2 = PDMApicSetTPR(pVCpu, pCtx->eax & 0xff);
4263 AssertRC(rc2);
4264 }
4265
4266 /* Skip the instruction and continue. */
4267 pCtx->rip += cbInstr; /* wrmsr = [0F 30] */
4268
4269 /* Only resume if successful. */
4270 goto ResumeExecution;
4271 }
4272 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_MSR;
4273 /* no break */
4274 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
4275 {
4276 STAM_COUNTER_INC((exitReason == VMX_EXIT_RDMSR) ? &pVCpu->hm.s.StatExitRdmsr : &pVCpu->hm.s.StatExitWrmsr);
4277
4278 /*
4279 * Note: The Intel spec. claims there's an REX version of RDMSR that's slightly different,
4280 * so we play safe by completely disassembling the instruction.
4281 */
4282 Log2(("VMX: %s\n", (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr"));
4283 rc = EMInterpretInstruction(pVCpu, CPUMCTX2CORE(pCtx), 0);
4284 if (rc == VINF_SUCCESS)
4285 {
4286 /* EIP has been updated already. */
4287 /* Only resume if successful. */
4288 goto ResumeExecution;
4289 }
4290 AssertMsg(rc == VERR_EM_INTERPRETER, ("EMU: %s failed with %Rrc\n",
4291 (exitReason == VMX_EXIT_RDMSR) ? "rdmsr" : "wrmsr", VBOXSTRICTRC_VAL(rc)));
4292 break;
4293 }
4294
4295 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */
4296 {
4297 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub2, y2);
4298
4299 switch (VMX_EXIT_QUALIFICATION_CRX_ACCESS(exitQualification))
4300 {
4301 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE:
4302 {
4303 Log2(("VMX: %RGv mov cr%d, x\n", (RTGCPTR)pCtx->rip, VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)));
4304 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
4305 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
4306 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification),
4307 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification));
4308 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification))
4309 {
4310 case 0:
4311 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0 | HM_CHANGED_GUEST_CR3;
4312 break;
4313 case 2:
4314 break;
4315 case 3:
4316 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestInPagedProtectedModeEx(pCtx));
4317 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
4318 break;
4319 case 4:
4320 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
4321 break;
4322 case 8:
4323 /* CR8 contains the APIC TPR */
4324 Assert(!(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1
4325 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
4326 break;
4327
4328 default:
4329 AssertFailed();
4330 break;
4331 }
4332 break;
4333 }
4334
4335 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ:
4336 {
4337 Log2(("VMX: mov x, crx\n"));
4338 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification)]);
4339
4340 Assert( !pVM->hm.s.fNestedPaging
4341 || !CPUMIsGuestInPagedProtectedModeEx(pCtx)
4342 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != DISCREG_CR3);
4343
4344 /* CR8 reads only cause an exit when the TPR shadow feature isn't present. */
4345 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification) != 8
4346 || !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_USE_TPR_SHADOW));
4347
4348 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
4349 VMX_EXIT_QUALIFICATION_CRX_GENREG(exitQualification),
4350 VMX_EXIT_QUALIFICATION_CRX_REGISTER(exitQualification));
4351 break;
4352 }
4353
4354 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS:
4355 {
4356 Log2(("VMX: clts\n"));
4357 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
4358 rc = EMInterpretCLTS(pVM, pVCpu);
4359 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
4360 break;
4361 }
4362
4363 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW:
4364 {
4365 Log2(("VMX: lmsw %x\n", VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification)));
4366 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
4367 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(exitQualification));
4368 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
4369 break;
4370 }
4371 }
4372
4373 /* Update EIP if no error occurred. */
4374 if (RT_SUCCESS(rc))
4375 pCtx->rip += cbInstr;
4376
4377 if (rc == VINF_SUCCESS)
4378 {
4379 /* Only resume if successful. */
4380 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2);
4381 goto ResumeExecution;
4382 }
4383 Assert(rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
4384 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub2, y2);
4385 break;
4386 }
4387
4388 case VMX_EXIT_DRX_MOVE: /* 29 Debug-register accesses. */
4389 {
4390 if ( !DBGFIsStepping(pVCpu)
4391 && !CPUMIsHyperDebugStateActive(pVCpu))
4392 {
4393 /* Disable DRx move intercepts. */
4394 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
4395 rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
4396 AssertRC(rc2);
4397
4398 /* Save the host and load the guest debug state. */
4399 rc2 = CPUMR0LoadGuestDebugState(pVM, pVCpu, pCtx, true /* include DR6 */);
4400 AssertRC(rc2);
4401
4402#ifdef LOG_ENABLED
4403 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
4404 {
4405 Log(("VMX_EXIT_DRX_MOVE: write DR%d genreg %d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
4406 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
4407 }
4408 else
4409 Log(("VMX_EXIT_DRX_MOVE: read DR%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification)));
4410#endif
4411
4412#ifdef VBOX_WITH_STATISTICS
4413 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
4414 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
4415 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
4416 else
4417 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
4418#endif
4419
4420 goto ResumeExecution;
4421 }
4422
4423 /** @todo clear VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT after the first
4424 * time and restore DRx registers afterwards */
4425 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
4426 {
4427 Log2(("VMX: mov DRx%d, genreg%d\n", VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
4428 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification)));
4429 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
4430 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pCtx),
4431 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification),
4432 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification));
4433 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
4434 Log2(("DR7=%08x\n", pCtx->dr[7]));
4435 }
4436 else
4437 {
4438 Log2(("VMX: mov x, DRx\n"));
4439 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
4440 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pCtx),
4441 VMX_EXIT_QUALIFICATION_DRX_GENREG(exitQualification),
4442 VMX_EXIT_QUALIFICATION_DRX_REGISTER(exitQualification));
4443 }
4444 /* Update EIP if no error occurred. */
4445 if (RT_SUCCESS(rc))
4446 pCtx->rip += cbInstr;
4447
4448 if (rc == VINF_SUCCESS)
4449 {
4450 /* Only resume if successful. */
4451 goto ResumeExecution;
4452 }
4453 Assert(rc == VERR_EM_INTERPRETER);
4454 break;
4455 }
4456
4457 /* Note: We'll get a #GP if the IO instruction isn't allowed (IOPL or TSS bitmap); no need to double check. */
4458 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */
4459 {
4460 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExit2Sub1, y1);
4461 uint32_t uPort;
4462 uint32_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(exitQualification);
4463 bool fIOWrite = (VMX_EXIT_QUALIFICATION_IO_DIRECTION(exitQualification) == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
4464
4465 /** @todo necessary to make the distinction? */
4466 if (VMX_EXIT_QUALIFICATION_IO_ENCODING(exitQualification) == VMX_EXIT_QUALIFICATION_IO_ENCODING_DX)
4467 uPort = pCtx->edx & 0xffff;
4468 else
4469 uPort = VMX_EXIT_QUALIFICATION_IO_PORT(exitQualification); /* Immediate encoding. */
4470
4471 if (RT_UNLIKELY(uIOWidth == 2 || uIOWidth >= 4)) /* paranoia */
4472 {
4473 rc = fIOWrite ? VINF_IOM_R3_IOPORT_WRITE : VINF_IOM_R3_IOPORT_READ;
4474 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
4475 break;
4476 }
4477
4478 uint32_t cbSize = g_aIOSize[uIOWidth];
4479 if (VMX_EXIT_QUALIFICATION_IO_STRING(exitQualification))
4480 {
4481 /* ins/outs */
4482 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
4483
4484 /* Disassemble manually to deal with segment prefixes. */
4485 /** @todo VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR contains the flat pointer
4486 * operand of the instruction. */
4487 /** @todo VMX_VMCS32_RO_EXIT_INSTR_INFO also contains segment prefix info. */
4488 rc2 = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
4489 if (RT_SUCCESS(rc))
4490 {
4491 if (fIOWrite)
4492 {
4493 Log2(("IOMInterpretOUTSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize));
4494 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
4495 rc = IOMInterpretOUTSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize);
4496 }
4497 else
4498 {
4499 Log2(("IOMInterpretINSEx %RGv %x size=%d\n", (RTGCPTR)pCtx->rip, uPort, cbSize));
4500 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
4501 rc = IOMInterpretINSEx(pVM, CPUMCTX2CORE(pCtx), uPort, pDis->fPrefix, (DISCPUMODE)pDis->uAddrMode, cbSize);
4502 }
4503 }
4504 else
4505 rc = VINF_EM_RAW_EMULATE_INSTR;
4506 }
4507 else
4508 {
4509 /* Normal in/out */
4510 uint32_t uAndVal = g_aIOOpAnd[uIOWidth];
4511
4512 Assert(!VMX_EXIT_QUALIFICATION_IO_REP(exitQualification));
4513
4514 if (fIOWrite)
4515 {
4516 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
4517 rc = IOMIOPortWrite(pVM, uPort, pCtx->eax & uAndVal, cbSize);
4518 if (rc == VINF_IOM_R3_IOPORT_WRITE)
4519 HMR0SavePendingIOPortWrite(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
4520 }
4521 else
4522 {
4523 uint32_t u32Val = 0;
4524
4525 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
4526 rc = IOMIOPortRead(pVM, uPort, &u32Val, cbSize);
4527 if (IOM_SUCCESS(rc))
4528 {
4529 /* Write back to the EAX register. */
4530 pCtx->eax = (pCtx->eax & ~uAndVal) | (u32Val & uAndVal);
4531 }
4532 else
4533 if (rc == VINF_IOM_R3_IOPORT_READ)
4534 HMR0SavePendingIOPortRead(pVCpu, pCtx->rip, pCtx->rip + cbInstr, uPort, uAndVal, cbSize);
4535 }
4536 }
4537
4538 /*
4539 * Handled the I/O return codes.
4540 * (The unhandled cases end up with rc == VINF_EM_RAW_EMULATE_INSTR.)
4541 */
4542 if (IOM_SUCCESS(rc))
4543 {
4544 /* Update EIP and continue execution. */
4545 pCtx->rip += cbInstr;
4546 if (RT_LIKELY(rc == VINF_SUCCESS))
4547 {
4548 /* If any IO breakpoints are armed, then we should check if a debug trap needs to be generated. */
4549 if (pCtx->dr[7] & X86_DR7_ENABLED_MASK)
4550 {
4551 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
4552 for (unsigned i = 0; i < 4; i++)
4553 {
4554 unsigned uBPLen = g_aIOSize[X86_DR7_GET_LEN(pCtx->dr[7], i)];
4555
4556 if ( (uPort >= pCtx->dr[i] && uPort < pCtx->dr[i] + uBPLen)
4557 && (pCtx->dr[7] & (X86_DR7_L(i) | X86_DR7_G(i)))
4558 && (pCtx->dr[7] & X86_DR7_RW(i, X86_DR7_RW_IO)) == X86_DR7_RW(i, X86_DR7_RW_IO))
4559 {
4560 uint64_t uDR6;
4561
4562 Assert(CPUMIsGuestDebugStateActive(pVCpu));
4563
4564 uDR6 = ASMGetDR6();
4565
4566 /* Clear all breakpoint status flags and set the one we just hit. */
4567 uDR6 &= ~(X86_DR6_B0|X86_DR6_B1|X86_DR6_B2|X86_DR6_B3);
4568 uDR6 |= (uint64_t)RT_BIT(i);
4569
4570 /*
4571 * Note: AMD64 Architecture Programmer's Manual 13.1:
4572 * Bits 15:13 of the DR6 register is never cleared by the processor and must
4573 * be cleared by software after the contents have been read.
4574 */
4575 ASMSetDR6(uDR6);
4576
4577 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
4578 pCtx->dr[7] &= ~X86_DR7_GD;
4579
4580 /* Paranoia. */
4581 pCtx->dr[7] &= 0xffffffff; /* upper 32 bits reserved */
4582 pCtx->dr[7] &= ~(RT_BIT(11) | RT_BIT(12) | RT_BIT(14) | RT_BIT(15)); /* must be zero */
4583 pCtx->dr[7] |= 0x400; /* must be one */
4584
4585 /* Resync DR7 */
4586 rc2 = VMXWriteVmcs64(VMX_VMCS_GUEST_DR7, pCtx->dr[7]);
4587 AssertRC(rc2);
4588
4589 /* Construct inject info. */
4590 intInfo = X86_XCPT_DB;
4591 intInfo |= (1 << VMX_EXIT_INTERRUPTION_INFO_VALID_SHIFT);
4592 intInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4593
4594 Log(("Inject IO debug trap at %RGv\n", (RTGCPTR)pCtx->rip));
4595 rc2 = hmR0VmxInjectEvent(pVM, pVCpu, pCtx, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(intInfo),
4596 0 /* cbInstr */, 0 /* errCode */);
4597 AssertRC(rc2);
4598
4599 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
4600 goto ResumeExecution;
4601 }
4602 }
4603 }
4604 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
4605 goto ResumeExecution;
4606 }
4607 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
4608 break;
4609 }
4610
4611#ifdef VBOX_STRICT
4612 if (rc == VINF_IOM_R3_IOPORT_READ)
4613 Assert(!fIOWrite);
4614 else if (rc == VINF_IOM_R3_IOPORT_WRITE)
4615 Assert(fIOWrite);
4616 else
4617 {
4618 AssertMsg( RT_FAILURE(rc)
4619 || rc == VINF_EM_RAW_EMULATE_INSTR
4620 || rc == VINF_EM_RAW_GUEST_TRAP
4621 || rc == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rc)));
4622 }
4623#endif
4624 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2Sub1, y1);
4625 break;
4626 }
4627
4628 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* 43 TPR below threshold. Guest software executed MOV to CR8. */
4629 LogFlow(("VMX_EXIT_TPR_BELOW_THRESHOLD\n"));
4630 /* RIP is already set to the next instruction and the TPR has been synced back. Just resume. */
4631 goto ResumeExecution;
4632
4633 case VMX_EXIT_APIC_ACCESS: /* 44 APIC access. Guest software attempted to access memory at a physical address
4634 on the APIC-access page. */
4635 {
4636 LogFlow(("VMX_EXIT_APIC_ACCESS\n"));
4637 unsigned uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(exitQualification);
4638
4639 switch (uAccessType)
4640 {
4641 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
4642 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
4643 {
4644 RTGCPHYS GCPhys = pCtx->msrApicBase;
4645 GCPhys &= PAGE_BASE_GC_MASK;
4646 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(exitQualification);
4647
4648 LogFlow(("Apic access at %RGp\n", GCPhys));
4649 rc = IOMMMIOPhysHandler(pVM, (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
4650 CPUMCTX2CORE(pCtx), GCPhys);
4651 if (rc == VINF_SUCCESS)
4652 goto ResumeExecution; /* rip already updated */
4653 break;
4654 }
4655
4656 default:
4657 rc = VINF_EM_RAW_EMULATE_INSTR;
4658 break;
4659 }
4660 break;
4661 }
4662
4663 case VMX_EXIT_PREEMPTION_TIMER: /* 52 VMX-preemption timer expired. The preemption timer counted down to zero. */
4664 if (!TMTimerPollBool(pVM, pVCpu))
4665 goto ResumeExecution;
4666 rc = VINF_EM_RAW_TIMER_PENDING;
4667 break;
4668
4669 default:
4670 /* The rest is handled after syncing the entire CPU state. */
4671 break;
4672 }
4673
4674
4675 /*
4676 * Note: The guest state is not entirely synced back at this stage!
4677 */
4678
4679 /* Investigate why there was a VM-exit. (part 2) */
4680 switch (exitReason)
4681 {
4682 case VMX_EXIT_XCPT_NMI: /* 0 Exception or non-maskable interrupt (NMI). */
4683 case VMX_EXIT_EXT_INT: /* 1 External interrupt. */
4684 case VMX_EXIT_EPT_VIOLATION:
4685 case VMX_EXIT_EPT_MISCONFIG: /* 49 EPT misconfig is used by the PGM/MMIO optimizations. */
4686 case VMX_EXIT_PREEMPTION_TIMER: /* 52 VMX-preemption timer expired. The preemption timer counted down to zero. */
4687 /* Already handled above. */
4688 break;
4689
4690 case VMX_EXIT_TRIPLE_FAULT: /* 2 Triple fault. */
4691 rc = VINF_EM_RESET; /* Triple fault equals a reset. */
4692 break;
4693
4694 case VMX_EXIT_INIT_SIGNAL: /* 3 INIT signal. */
4695 case VMX_EXIT_SIPI: /* 4 Start-up IPI (SIPI). */
4696 rc = VINF_EM_RAW_INTERRUPT;
4697 AssertFailed(); /* Can't happen. Yet. */
4698 break;
4699
4700 case VMX_EXIT_IO_SMI: /* 5 I/O system-management interrupt (SMI). */
4701 case VMX_EXIT_SMI: /* 6 Other SMI. */
4702 rc = VINF_EM_RAW_INTERRUPT;
4703 AssertFailed(); /* Can't happen afaik. */
4704 break;
4705
4706 case VMX_EXIT_TASK_SWITCH: /* 9 Task switch: too complicated to emulate, so fall back to the recompiler */
4707 Log(("VMX_EXIT_TASK_SWITCH: exit=%RX64\n", exitQualification));
4708 if ( (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(exitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
4709 && pVCpu->hm.s.Event.fPending)
4710 {
4711 /* Caused by an injected interrupt. */
4712 pVCpu->hm.s.Event.fPending = false;
4713
4714 Log(("VMX_EXIT_TASK_SWITCH: reassert trap %d\n", VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo)));
4715 Assert(!VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo));
4716 //@todo: Why do we assume this had to be a hardware interrupt? What about software interrupts or exceptions?
4717 rc2 = TRPMAssertTrap(pVCpu, VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo), TRPM_HARDWARE_INT);
4718 AssertRC(rc2);
4719 }
4720 /* else Exceptions and software interrupts can just be restarted. */
4721 rc = VERR_EM_INTERPRETER;
4722 break;
4723
4724 case VMX_EXIT_HLT: /* 12 Guest software attempted to execute HLT. */
4725 /* Check if external interrupts are pending; if so, don't switch back. */
4726 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
4727 pCtx->rip++; /* skip hlt */
4728 if (EMShouldContinueAfterHalt(pVCpu, pCtx))
4729 goto ResumeExecution;
4730
4731 rc = VINF_EM_HALT;
4732 break;
4733
4734 case VMX_EXIT_MWAIT: /* 36 Guest software executed MWAIT. */
4735 Log2(("VMX: mwait\n"));
4736 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
4737 rc = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pCtx));
4738 if ( rc == VINF_EM_HALT
4739 || rc == VINF_SUCCESS)
4740 {
4741 /* Update EIP and continue execution. */
4742 pCtx->rip += cbInstr;
4743
4744 /* Check if external interrupts are pending; if so, don't switch back. */
4745 if ( rc == VINF_SUCCESS
4746 || ( rc == VINF_EM_HALT
4747 && EMShouldContinueAfterHalt(pVCpu, pCtx))
4748 )
4749 goto ResumeExecution;
4750 }
4751 AssertMsg(rc == VERR_EM_INTERPRETER || rc == VINF_EM_HALT, ("EMU: mwait failed with %Rrc\n", VBOXSTRICTRC_VAL(rc)));
4752 break;
4753
4754 case VMX_EXIT_RSM: /* 17 Guest software attempted to execute RSM in SMM. */
4755 AssertFailed(); /* can't happen. */
4756 rc = VERR_EM_INTERPRETER;
4757 break;
4758
4759 case VMX_EXIT_MTF: /* 37 Exit due to Monitor Trap Flag. */
4760 LogFlow(("VMX_EXIT_MTF at %RGv\n", (RTGCPTR)pCtx->rip));
4761 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MONITOR_TRAP_FLAG;
4762 rc2 = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
4763 AssertRC(rc2);
4764 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
4765#if 0
4766 DBGFDoneStepping(pVCpu);
4767#endif
4768 rc = VINF_EM_DBG_STOP;
4769 break;
4770
4771 case VMX_EXIT_VMCALL: /* 18 Guest software executed VMCALL. */
4772 case VMX_EXIT_VMCLEAR: /* 19 Guest software executed VMCLEAR. */
4773 case VMX_EXIT_VMLAUNCH: /* 20 Guest software executed VMLAUNCH. */
4774 case VMX_EXIT_VMPTRLD: /* 21 Guest software executed VMPTRLD. */
4775 case VMX_EXIT_VMPTRST: /* 22 Guest software executed VMPTRST. */
4776 case VMX_EXIT_VMREAD: /* 23 Guest software executed VMREAD. */
4777 case VMX_EXIT_VMRESUME: /* 24 Guest software executed VMRESUME. */
4778 case VMX_EXIT_VMWRITE: /* 25 Guest software executed VMWRITE. */
4779 case VMX_EXIT_VMXOFF: /* 26 Guest software executed VMXOFF. */
4780 case VMX_EXIT_VMXON: /* 27 Guest software executed VMXON. */
4781 /** @todo inject #UD immediately */
4782 rc = VERR_EM_INTERPRETER;
4783 break;
4784
4785 case VMX_EXIT_CPUID: /* 10 Guest software attempted to execute CPUID. */
4786 case VMX_EXIT_RDTSC: /* 16 Guest software attempted to execute RDTSC. */
4787 case VMX_EXIT_INVLPG: /* 14 Guest software attempted to execute INVLPG. */
4788 case VMX_EXIT_CRX_MOVE: /* 28 Control-register accesses. */
4789 case VMX_EXIT_DRX_MOVE: /* 29 Debug-register accesses. */
4790 case VMX_EXIT_PORT_IO: /* 30 I/O instruction. */
4791 case VMX_EXIT_RDPMC: /* 15 Guest software attempted to execute RDPMC. */
4792 case VMX_EXIT_RDTSCP: /* 51 Guest software attempted to execute RDTSCP. */
4793 /* already handled above */
4794 AssertMsg( rc == VINF_PGM_CHANGE_MODE
4795 || rc == VINF_EM_RAW_INTERRUPT
4796 || rc == VERR_EM_INTERPRETER
4797 || rc == VINF_EM_RAW_EMULATE_INSTR
4798 || rc == VINF_PGM_SYNC_CR3
4799 || rc == VINF_IOM_R3_IOPORT_READ
4800 || rc == VINF_IOM_R3_IOPORT_WRITE
4801 || rc == VINF_EM_RAW_GUEST_TRAP
4802 || rc == VINF_TRPM_XCPT_DISPATCHED
4803 || rc == VINF_EM_RESCHEDULE_REM,
4804 ("rc = %d\n", VBOXSTRICTRC_VAL(rc)));
4805 break;
4806
4807 case VMX_EXIT_TPR_BELOW_THRESHOLD: /* 43 TPR below threshold. Guest software executed MOV to CR8. */
4808 case VMX_EXIT_RDMSR: /* 31 RDMSR. Guest software attempted to execute RDMSR. */
4809 case VMX_EXIT_WRMSR: /* 32 WRMSR. Guest software attempted to execute WRMSR. */
4810 case VMX_EXIT_PAUSE: /* 40 Guest software attempted to execute PAUSE. */
4811 case VMX_EXIT_MONITOR: /* 39 Guest software attempted to execute MONITOR. */
4812 case VMX_EXIT_APIC_ACCESS: /* 44 APIC access. Guest software attempted to access memory at a physical address
4813 on the APIC-access page. */
4814 {
4815 /*
4816 * If we decided to emulate them here, then we must sync the MSRs that could have been changed (sysenter, FS/GS base)
4817 */
4818 rc = VERR_EM_INTERPRETER;
4819 break;
4820 }
4821
4822 case VMX_EXIT_INT_WINDOW: /* 7 Interrupt window. */
4823 Assert(rc == VINF_EM_RAW_INTERRUPT);
4824 break;
4825
4826 case VMX_EXIT_ERR_INVALID_GUEST_STATE: /* 33 VM-entry failure due to invalid guest state. */
4827 {
4828#ifdef VBOX_STRICT
4829 RTCCUINTREG val2 = 0;
4830
4831 Log(("VMX_EXIT_ERR_INVALID_GUEST_STATE\n"));
4832
4833 VMXReadVmcs(VMX_VMCS_GUEST_RIP, &val2);
4834 Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val2));
4835
4836 VMXReadVmcs(VMX_VMCS_GUEST_CR0, &val2);
4837 Log(("VMX_VMCS_GUEST_CR0 %RX64\n", (uint64_t)val2));
4838
4839 VMXReadVmcs(VMX_VMCS_GUEST_CR3, &val2);
4840 Log(("VMX_VMCS_GUEST_CR3 %RX64\n", (uint64_t)val2));
4841
4842 VMXReadVmcs(VMX_VMCS_GUEST_CR4, &val2);
4843 Log(("VMX_VMCS_GUEST_CR4 %RX64\n", (uint64_t)val2));
4844
4845 VMXReadVmcs(VMX_VMCS_GUEST_RFLAGS, &val2);
4846 Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val2));
4847
4848 VMX_LOG_SELREG(CS, "CS", val2);
4849 VMX_LOG_SELREG(DS, "DS", val2);
4850 VMX_LOG_SELREG(ES, "ES", val2);
4851 VMX_LOG_SELREG(FS, "FS", val2);
4852 VMX_LOG_SELREG(GS, "GS", val2);
4853 VMX_LOG_SELREG(SS, "SS", val2);
4854 VMX_LOG_SELREG(TR, "TR", val2);
4855 VMX_LOG_SELREG(LDTR, "LDTR", val2);
4856
4857 VMXReadVmcs(VMX_VMCS_GUEST_GDTR_BASE, &val2);
4858 Log(("VMX_VMCS_GUEST_GDTR_BASE %RX64\n", (uint64_t)val2));
4859 VMXReadVmcs(VMX_VMCS_GUEST_IDTR_BASE, &val2);
4860 Log(("VMX_VMCS_GUEST_IDTR_BASE %RX64\n", (uint64_t)val2));
4861#endif /* VBOX_STRICT */
4862 rc = VERR_VMX_INVALID_GUEST_STATE;
4863 break;
4864 }
4865
4866 case VMX_EXIT_ERR_MSR_LOAD: /* 34 VM-entry failure due to MSR loading. */
4867 case VMX_EXIT_ERR_MACHINE_CHECK: /* 41 VM-entry failure due to machine-check. */
4868 default:
4869 rc = VERR_VMX_UNEXPECTED_EXIT_CODE;
4870 AssertMsgFailed(("Unexpected exit code %d\n", exitReason)); /* Can't happen. */
4871 break;
4872
4873 }
4874
4875end:
4876 /* We now going back to ring-3, so clear the action flag. */
4877 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
4878
4879 /*
4880 * Signal changes for the recompiler.
4881 */
4882 CPUMSetChangedFlags(pVCpu,
4883 CPUM_CHANGED_SYSENTER_MSR
4884 | CPUM_CHANGED_LDTR
4885 | CPUM_CHANGED_GDTR
4886 | CPUM_CHANGED_IDTR
4887 | CPUM_CHANGED_TR
4888 | CPUM_CHANGED_HIDDEN_SEL_REGS);
4889
4890 /*
4891 * If we executed vmlaunch/vmresume and an external IRQ was pending, then we don't have to do a full sync the next time.
4892 */
4893 if ( exitReason == VMX_EXIT_EXT_INT
4894 && !VMX_EXIT_INTERRUPTION_INFO_VALID(intInfo))
4895 {
4896 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
4897 /* On the next entry we'll only sync the host context. */
4898 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
4899 }
4900 else
4901 {
4902 /* On the next entry we'll sync everything. */
4903 /** @todo we can do better than this */
4904 /* Not in the VINF_PGM_CHANGE_MODE though! */
4905 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL;
4906 }
4907
4908 /* Translate into a less severe return code */
4909 if (rc == VERR_EM_INTERPRETER)
4910 rc = VINF_EM_RAW_EMULATE_INSTR;
4911 else if (rc == VERR_VMX_INVALID_VMCS_PTR)
4912 {
4913 /* Try to extract more information about what might have gone wrong here. */
4914 VMXGetActivateVMCS(&pVCpu->hm.s.vmx.lasterror.u64VMCSPhys);
4915 pVCpu->hm.s.vmx.lasterror.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVMCS;
4916 pVCpu->hm.s.vmx.lasterror.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4917 pVCpu->hm.s.vmx.lasterror.idCurrentCpu = RTMpCpuId();
4918 }
4919
4920 /* Just set the correct state here instead of trying to catch every goto above. */
4921 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED, VMCPUSTATE_STARTED_EXEC);
4922
4923#ifdef VBOX_WITH_VMMR0_DISABLE_PREEMPTION
4924 /* Restore interrupts if we exited after disabling them. */
4925 if (uOldEFlags != ~(RTCCUINTREG)0)
4926 ASMSetFlags(uOldEFlags);
4927#endif
4928
4929 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
4930 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
4931 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
4932 Log2(("X"));
4933 return VBOXSTRICTRC_TODO(rc);
4934}
4935
4936
4937/**
4938 * Enters the VT-x session.
4939 *
4940 * @returns VBox status code.
4941 * @param pVM Pointer to the VM.
4942 * @param pVCpu Pointer to the VMCPU.
4943 * @param pCpu Pointer to the CPU info struct.
4944 */
4945VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBLCPUINFO pCpu)
4946{
4947 Assert(pVM->hm.s.vmx.fSupported);
4948 NOREF(pCpu);
4949
4950 unsigned cr4 = ASMGetCR4();
4951 if (!(cr4 & X86_CR4_VMXE))
4952 {
4953 AssertMsgFailed(("X86_CR4_VMXE should be set!\n"));
4954 return VERR_VMX_X86_CR4_VMXE_CLEARED;
4955 }
4956
4957 /* Activate the VMCS. */
4958 int rc = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
4959 if (RT_FAILURE(rc))
4960 return rc;
4961
4962 pVCpu->hm.s.fResumeVM = false;
4963 return VINF_SUCCESS;
4964}
4965
4966
4967/**
4968 * Leaves the VT-x session.
4969 *
4970 * @returns VBox status code.
4971 * @param pVM Pointer to the VM.
4972 * @param pVCpu Pointer to the VMCPU.
4973 * @param pCtx Pointer to the guests CPU context.
4974 */
4975VMMR0DECL(int) VMXR0Leave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4976{
4977 Assert(pVM->hm.s.vmx.fSupported);
4978
4979#ifdef DEBUG
4980 if (CPUMIsHyperDebugStateActive(pVCpu))
4981 {
4982 CPUMR0LoadHostDebugState(pVM, pVCpu);
4983 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
4984 }
4985 else
4986#endif
4987
4988 /*
4989 * Save the guest debug state if necessary.
4990 */
4991 if (CPUMIsGuestDebugStateActive(pVCpu))
4992 {
4993 CPUMR0SaveGuestDebugState(pVM, pVCpu, pCtx, true /* save DR6 */);
4994
4995 /* Enable DRx move intercepts again. */
4996 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT;
4997 int rc = VMXWriteVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, pVCpu->hm.s.vmx.u32ProcCtls);
4998 AssertRC(rc);
4999
5000 /* Resync the debug registers the next time. */
5001 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
5002 }
5003 else
5004 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_CONTROLS_MOV_DR_EXIT);
5005
5006 /*
5007 * Clear VMCS, marking it inactive, clearing implementation-specific data and writing
5008 * VMCS data back to memory.
5009 */
5010 int rc = VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
5011 AssertRC(rc);
5012
5013 return VINF_SUCCESS;
5014}
5015
5016
5017/**
5018 * Flush the TLB using EPT.
5019 *
5020 * @returns VBox status code.
5021 * @param pVM Pointer to the VM.
5022 * @param pVCpu Pointer to the VMCPU.
5023 * @param enmFlush Type of flush.
5024 */
5025static void hmR0VmxFlushEPT(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
5026{
5027 uint64_t descriptor[2];
5028
5029 LogFlow(("hmR0VmxFlushEPT %d\n", enmFlush));
5030 Assert(pVM->hm.s.fNestedPaging);
5031 descriptor[0] = pVCpu->hm.s.vmx.GCPhysEPTP;
5032 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 VMX Instructions */
5033 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
5034 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %x %RGv failed with %d\n", enmFlush, pVCpu->hm.s.vmx.GCPhysEPTP, rc));
5035#ifdef VBOX_WITH_STATISTICS
5036 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
5037#endif
5038}
5039
5040
5041/**
5042 * Flush the TLB using VPID.
5043 *
5044 * @returns VBox status code.
5045 * @param pVM Pointer to the VM.
5046 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
5047 * enmFlush).
5048 * @param enmFlush Type of flush.
5049 * @param GCPtr Virtual address of the page to flush (can be 0 depending
5050 * on @a enmFlush).
5051 */
5052static void hmR0VmxFlushVPID(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
5053{
5054 uint64_t descriptor[2];
5055
5056 Assert(pVM->hm.s.vmx.fVpid);
5057 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
5058 {
5059 descriptor[0] = 0;
5060 descriptor[1] = 0;
5061 }
5062 else
5063 {
5064 AssertPtr(pVCpu);
5065 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
5066 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
5067 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
5068 descriptor[1] = GCPtr;
5069 }
5070 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
5071 AssertMsg(rc == VINF_SUCCESS,
5072 ("VMXR0InvVPID %x %x %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
5073#ifdef VBOX_WITH_STATISTICS
5074 if (pVCpu)
5075 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
5076#endif
5077}
5078
5079
5080/**
5081 * Invalidates a guest page by guest virtual address. Only relevant for
5082 * EPT/VPID, otherwise there is nothing really to invalidate.
5083 *
5084 * @returns VBox status code.
5085 * @param pVM Pointer to the VM.
5086 * @param pVCpu Pointer to the VMCPU.
5087 * @param GCVirt Guest virtual address of the page to invalidate.
5088 */
5089VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
5090{
5091 bool fFlushPending = VMCPU_FF_ISSET(pVCpu, VMCPU_FF_TLB_FLUSH);
5092
5093 Log2(("VMXR0InvalidatePage %RGv\n", GCVirt));
5094
5095 if (!fFlushPending)
5096 {
5097 /*
5098 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
5099 * See @bugref{6043} and @bugref{6177}
5100 *
5101 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VMENTRY in hmR0VmxSetupTLB*() as this
5102 * function maybe called in a loop with individual addresses.
5103 */
5104 if (pVM->hm.s.vmx.fVpid)
5105 {
5106 /* If we can flush just this page do it, otherwise flush as little as possible. */
5107 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
5108 hmR0VmxFlushVPID(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
5109 else
5110 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5111 }
5112 else if (pVM->hm.s.fNestedPaging)
5113 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5114 }
5115
5116 return VINF_SUCCESS;
5117}
5118
5119
5120/**
5121 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
5122 * otherwise there is nothing really to invalidate.
5123 *
5124 * NOTE: Assumes the current instruction references this physical page though a virtual address!!
5125 *
5126 * @returns VBox status code.
5127 * @param pVM Pointer to the VM.
5128 * @param pVCpu Pointer to the VMCPU.
5129 * @param GCPhys Guest physical address of the page to invalidate.
5130 */
5131VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
5132{
5133 LogFlow(("VMXR0InvalidatePhysPage %RGp\n", GCPhys));
5134
5135 /*
5136 * We cannot flush a page by guest-physical address. invvpid takes only a linear address
5137 * while invept only flushes by EPT not individual addresses. We update the force flag here
5138 * and flush before VMENTRY in hmR0VmxSetupTLB*(). This function might be called in a loop.
5139 */
5140 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
5141 return VINF_SUCCESS;
5142}
5143
5144
5145/**
5146 * Report world switch error and dump some useful debug info.
5147 *
5148 * @param pVM Pointer to the VM.
5149 * @param pVCpu Pointer to the VMCPU.
5150 * @param rc Return code.
5151 * @param pCtx Pointer to the current guest CPU context (not updated).
5152 */
5153static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, VBOXSTRICTRC rc, PCPUMCTX pCtx)
5154{
5155 NOREF(pVM);
5156
5157 switch (VBOXSTRICTRC_VAL(rc))
5158 {
5159 case VERR_VMX_INVALID_VMXON_PTR:
5160 AssertFailed();
5161 break;
5162
5163 case VERR_VMX_UNABLE_TO_START_VM:
5164 case VERR_VMX_UNABLE_TO_RESUME_VM:
5165 {
5166 int rc2;
5167 RTCCUINTREG exitReason, instrError;
5168
5169 rc2 = VMXReadVmcs(VMX_VMCS32_RO_EXIT_REASON, &exitReason);
5170 rc2 |= VMXReadVmcs(VMX_VMCS32_RO_VM_INSTR_ERROR, &instrError);
5171 AssertRC(rc2);
5172 if (rc2 == VINF_SUCCESS)
5173 {
5174 Log(("Unable to start/resume VM for reason: %x. Instruction error %x\n", (uint32_t)exitReason,
5175 (uint32_t)instrError));
5176 Log(("Current stack %08x\n", &rc2));
5177
5178 pVCpu->hm.s.vmx.lasterror.u32InstrError = instrError;
5179 pVCpu->hm.s.vmx.lasterror.u32ExitReason = exitReason;
5180
5181#ifdef VBOX_STRICT
5182 RTGDTR gdtr;
5183 PCX86DESCHC pDesc;
5184 RTCCUINTREG val;
5185
5186 ASMGetGDTR(&gdtr);
5187
5188 VMXReadVmcs(VMX_VMCS_GUEST_RIP, &val);
5189 Log(("Old eip %RGv new %RGv\n", (RTGCPTR)pCtx->rip, (RTGCPTR)val));
5190 VMXReadVmcs(VMX_VMCS32_CTRL_PIN_EXEC_CONTROLS, &val);
5191 Log(("VMX_VMCS_CTRL_PIN_EXEC_CONTROLS %08x\n", val));
5192 VMXReadVmcs(VMX_VMCS32_CTRL_PROC_EXEC_CONTROLS, &val);
5193 Log(("VMX_VMCS_CTRL_PROC_EXEC_CONTROLS %08x\n", val));
5194 VMXReadVmcs(VMX_VMCS32_CTRL_ENTRY_CONTROLS, &val);
5195 Log(("VMX_VMCS_CTRL_ENTRY_CONTROLS %08x\n", val));
5196 VMXReadVmcs(VMX_VMCS32_CTRL_EXIT_CONTROLS, &val);
5197 Log(("VMX_VMCS_CTRL_EXIT_CONTROLS %08x\n", val));
5198
5199 VMXReadVmcs(VMX_VMCS_HOST_CR0, &val);
5200 Log(("VMX_VMCS_HOST_CR0 %08x\n", val));
5201 VMXReadVmcs(VMX_VMCS_HOST_CR3, &val);
5202 Log(("VMX_VMCS_HOST_CR3 %08x\n", val));
5203 VMXReadVmcs(VMX_VMCS_HOST_CR4, &val);
5204 Log(("VMX_VMCS_HOST_CR4 %08x\n", val));
5205
5206 VMXReadVmcs(VMX_VMCS16_HOST_FIELD_CS, &val);
5207 Log(("VMX_VMCS_HOST_FIELD_CS %08x\n", val));
5208 VMXReadVmcs(VMX_VMCS_GUEST_RFLAGS, &val);
5209 Log(("VMX_VMCS_GUEST_RFLAGS %08x\n", val));
5210
5211 if (val < gdtr.cbGdt)
5212 {
5213 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5214 HMR0DumpDescriptor(pDesc, val, "CS: ");
5215 }
5216
5217 VMXReadVmcs(VMX_VMCS16_HOST_FIELD_DS, &val);
5218 Log(("VMX_VMCS_HOST_FIELD_DS %08x\n", val));
5219 if (val < gdtr.cbGdt)
5220 {
5221 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5222 HMR0DumpDescriptor(pDesc, val, "DS: ");
5223 }
5224
5225 VMXReadVmcs(VMX_VMCS16_HOST_FIELD_ES, &val);
5226 Log(("VMX_VMCS_HOST_FIELD_ES %08x\n", val));
5227 if (val < gdtr.cbGdt)
5228 {
5229 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5230 HMR0DumpDescriptor(pDesc, val, "ES: ");
5231 }
5232
5233 VMXReadVmcs(VMX_VMCS16_HOST_FIELD_FS, &val);
5234 Log(("VMX_VMCS16_HOST_FIELD_FS %08x\n", val));
5235 if (val < gdtr.cbGdt)
5236 {
5237 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5238 HMR0DumpDescriptor(pDesc, val, "FS: ");
5239 }
5240
5241 VMXReadVmcs(VMX_VMCS16_HOST_FIELD_GS, &val);
5242 Log(("VMX_VMCS16_HOST_FIELD_GS %08x\n", val));
5243 if (val < gdtr.cbGdt)
5244 {
5245 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5246 HMR0DumpDescriptor(pDesc, val, "GS: ");
5247 }
5248
5249 VMXReadVmcs(VMX_VMCS16_HOST_FIELD_SS, &val);
5250 Log(("VMX_VMCS16_HOST_FIELD_SS %08x\n", val));
5251 if (val < gdtr.cbGdt)
5252 {
5253 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5254 HMR0DumpDescriptor(pDesc, val, "SS: ");
5255 }
5256
5257 VMXReadVmcs(VMX_VMCS16_HOST_FIELD_TR, &val);
5258 Log(("VMX_VMCS16_HOST_FIELD_TR %08x\n", val));
5259 if (val < gdtr.cbGdt)
5260 {
5261 pDesc = (PCX86DESCHC)(gdtr.pGdt + (val & X86_SEL_MASK));
5262 HMR0DumpDescriptor(pDesc, val, "TR: ");
5263 }
5264
5265 VMXReadVmcs(VMX_VMCS_HOST_TR_BASE, &val);
5266 Log(("VMX_VMCS_HOST_TR_BASE %RHv\n", val));
5267 VMXReadVmcs(VMX_VMCS_HOST_GDTR_BASE, &val);
5268 Log(("VMX_VMCS_HOST_GDTR_BASE %RHv\n", val));
5269 VMXReadVmcs(VMX_VMCS_HOST_IDTR_BASE, &val);
5270 Log(("VMX_VMCS_HOST_IDTR_BASE %RHv\n", val));
5271 VMXReadVmcs(VMX_VMCS32_HOST_SYSENTER_CS, &val);
5272 Log(("VMX_VMCS_HOST_SYSENTER_CS %08x\n", val));
5273 VMXReadVmcs(VMX_VMCS_HOST_SYSENTER_EIP, &val);
5274 Log(("VMX_VMCS_HOST_SYSENTER_EIP %RHv\n", val));
5275 VMXReadVmcs(VMX_VMCS_HOST_SYSENTER_ESP, &val);
5276 Log(("VMX_VMCS_HOST_SYSENTER_ESP %RHv\n", val));
5277 VMXReadVmcs(VMX_VMCS_HOST_RSP, &val);
5278 Log(("VMX_VMCS_HOST_RSP %RHv\n", val));
5279 VMXReadVmcs(VMX_VMCS_HOST_RIP, &val);
5280 Log(("VMX_VMCS_HOST_RIP %RHv\n", val));
5281# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5282 if (VMX_IS_64BIT_HOST_MODE())
5283 {
5284 Log(("MSR_K6_EFER = %RX64\n", ASMRdMsr(MSR_K6_EFER)));
5285 Log(("MSR_K6_STAR = %RX64\n", ASMRdMsr(MSR_K6_STAR)));
5286 Log(("MSR_K8_LSTAR = %RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
5287 Log(("MSR_K8_CSTAR = %RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
5288 Log(("MSR_K8_SF_MASK = %RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
5289 Log(("MSR_K8_KERNEL_GS_BASE = %RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
5290 }
5291# endif
5292#endif /* VBOX_STRICT */
5293 }
5294 break;
5295 }
5296
5297 default:
5298 /* impossible */
5299 AssertMsgFailed(("%Rrc (%#x)\n", VBOXSTRICTRC_VAL(rc), VBOXSTRICTRC_VAL(rc)));
5300 break;
5301 }
5302}
5303
5304
5305#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5306/**
5307 * Prepares for and executes VMLAUNCH (64 bits guest mode).
5308 *
5309 * @returns VBox status code.
5310 * @param fResume Whether to vmlauch/vmresume.
5311 * @param pCtx Pointer to the guest CPU context.
5312 * @param pCache Pointer to the VMCS cache.
5313 * @param pVM Pointer to the VM.
5314 * @param pVCpu Pointer to the VMCPU.
5315 */
5316DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
5317{
5318 uint32_t aParam[6];
5319 PHMGLOBLCPUINFO pCpu;
5320 RTHCPHYS HCPhysCpuPage;
5321 int rc;
5322
5323 pCpu = HMR0GetCurrentCpu();
5324 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5325
5326#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5327 pCache->uPos = 1;
5328 pCache->interPD = PGMGetInterPaeCR3(pVM);
5329 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
5330#endif
5331
5332#ifdef DEBUG
5333 pCache->TestIn.HCPhysCpuPage= 0;
5334 pCache->TestIn.HCPhysVMCS = 0;
5335 pCache->TestIn.pCache = 0;
5336 pCache->TestOut.HCPhysVMCS = 0;
5337 pCache->TestOut.pCache = 0;
5338 pCache->TestOut.pCtx = 0;
5339 pCache->TestOut.eflags = 0;
5340#endif
5341
5342 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
5343 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
5344 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS); /* Param 2: VMCS physical address - Lo. */
5345 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVMCS >> 32); /* Param 2: VMCS physical address - Hi. */
5346 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
5347 aParam[5] = 0;
5348
5349#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5350 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
5351 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
5352#endif
5353 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, pVM->hm.s.pfnVMXGCStartVM64, 6, &aParam[0]);
5354
5355#ifdef VBOX_WITH_CRASHDUMP_MAGIC
5356 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
5357 Assert(pCtx->dr[4] == 10);
5358 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
5359#endif
5360
5361#ifdef DEBUG
5362 AssertMsg(pCache->TestIn.HCPhysCpuPage== HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
5363 AssertMsg(pCache->TestIn.HCPhysVMCS == pVCpu->hm.s.vmx.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
5364 pVCpu->hm.s.vmx.HCPhysVMCS));
5365 AssertMsg(pCache->TestIn.HCPhysVMCS == pCache->TestOut.HCPhysVMCS, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVMCS,
5366 pCache->TestOut.HCPhysVMCS));
5367 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
5368 pCache->TestOut.pCache));
5369 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
5370 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
5371 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
5372 pCache->TestOut.pCtx));
5373 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
5374#endif
5375 return rc;
5376}
5377
5378
5379# ifdef VBOX_STRICT
5380static bool hmR0VmxIsValidReadField(uint32_t idxField)
5381{
5382 switch (idxField)
5383 {
5384 case VMX_VMCS_GUEST_RIP:
5385 case VMX_VMCS_GUEST_RSP:
5386 case VMX_VMCS_GUEST_RFLAGS:
5387 case VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE:
5388 case VMX_VMCS_CTRL_CR0_READ_SHADOW:
5389 case VMX_VMCS_GUEST_CR0:
5390 case VMX_VMCS_CTRL_CR4_READ_SHADOW:
5391 case VMX_VMCS_GUEST_CR4:
5392 case VMX_VMCS_GUEST_DR7:
5393 case VMX_VMCS32_GUEST_SYSENTER_CS:
5394 case VMX_VMCS_GUEST_SYSENTER_EIP:
5395 case VMX_VMCS_GUEST_SYSENTER_ESP:
5396 case VMX_VMCS32_GUEST_GDTR_LIMIT:
5397 case VMX_VMCS_GUEST_GDTR_BASE:
5398 case VMX_VMCS32_GUEST_IDTR_LIMIT:
5399 case VMX_VMCS_GUEST_IDTR_BASE:
5400 case VMX_VMCS16_GUEST_FIELD_CS:
5401 case VMX_VMCS32_GUEST_CS_LIMIT:
5402 case VMX_VMCS_GUEST_CS_BASE:
5403 case VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS:
5404 case VMX_VMCS16_GUEST_FIELD_DS:
5405 case VMX_VMCS32_GUEST_DS_LIMIT:
5406 case VMX_VMCS_GUEST_DS_BASE:
5407 case VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS:
5408 case VMX_VMCS16_GUEST_FIELD_ES:
5409 case VMX_VMCS32_GUEST_ES_LIMIT:
5410 case VMX_VMCS_GUEST_ES_BASE:
5411 case VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS:
5412 case VMX_VMCS16_GUEST_FIELD_FS:
5413 case VMX_VMCS32_GUEST_FS_LIMIT:
5414 case VMX_VMCS_GUEST_FS_BASE:
5415 case VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS:
5416 case VMX_VMCS16_GUEST_FIELD_GS:
5417 case VMX_VMCS32_GUEST_GS_LIMIT:
5418 case VMX_VMCS_GUEST_GS_BASE:
5419 case VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS:
5420 case VMX_VMCS16_GUEST_FIELD_SS:
5421 case VMX_VMCS32_GUEST_SS_LIMIT:
5422 case VMX_VMCS_GUEST_SS_BASE:
5423 case VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS:
5424 case VMX_VMCS16_GUEST_FIELD_LDTR:
5425 case VMX_VMCS32_GUEST_LDTR_LIMIT:
5426 case VMX_VMCS_GUEST_LDTR_BASE:
5427 case VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS:
5428 case VMX_VMCS16_GUEST_FIELD_TR:
5429 case VMX_VMCS32_GUEST_TR_LIMIT:
5430 case VMX_VMCS_GUEST_TR_BASE:
5431 case VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS:
5432 case VMX_VMCS32_RO_EXIT_REASON:
5433 case VMX_VMCS32_RO_VM_INSTR_ERROR:
5434 case VMX_VMCS32_RO_EXIT_INSTR_LENGTH:
5435 case VMX_VMCS32_RO_EXIT_INTERRUPTION_ERRCODE:
5436 case VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO:
5437 case VMX_VMCS32_RO_EXIT_INSTR_INFO:
5438 case VMX_VMCS_RO_EXIT_QUALIFICATION:
5439 case VMX_VMCS32_RO_IDT_INFO:
5440 case VMX_VMCS32_RO_IDT_ERRCODE:
5441 case VMX_VMCS_GUEST_CR3:
5442 case VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL:
5443 return true;
5444 }
5445 return false;
5446}
5447
5448
5449static bool hmR0VmxIsValidWriteField(uint32_t idxField)
5450{
5451 switch (idxField)
5452 {
5453 case VMX_VMCS_GUEST_LDTR_BASE:
5454 case VMX_VMCS_GUEST_TR_BASE:
5455 case VMX_VMCS_GUEST_GDTR_BASE:
5456 case VMX_VMCS_GUEST_IDTR_BASE:
5457 case VMX_VMCS_GUEST_SYSENTER_EIP:
5458 case VMX_VMCS_GUEST_SYSENTER_ESP:
5459 case VMX_VMCS_GUEST_CR0:
5460 case VMX_VMCS_GUEST_CR4:
5461 case VMX_VMCS_GUEST_CR3:
5462 case VMX_VMCS_GUEST_DR7:
5463 case VMX_VMCS_GUEST_RIP:
5464 case VMX_VMCS_GUEST_RSP:
5465 case VMX_VMCS_GUEST_CS_BASE:
5466 case VMX_VMCS_GUEST_DS_BASE:
5467 case VMX_VMCS_GUEST_ES_BASE:
5468 case VMX_VMCS_GUEST_FS_BASE:
5469 case VMX_VMCS_GUEST_GS_BASE:
5470 case VMX_VMCS_GUEST_SS_BASE:
5471 return true;
5472 }
5473 return false;
5474}
5475# endif /* VBOX_STRICT */
5476
5477
5478/**
5479 * Executes the specified handler in 64-bit mode.
5480 *
5481 * @returns VBox status code.
5482 * @param pVM Pointer to the VM.
5483 * @param pVCpu Pointer to the VMCPU.
5484 * @param pCtx Pointer to the guest CPU context.
5485 * @param pfnHandler Pointer to the RC handler function.
5486 * @param cbParam Number of parameters.
5487 * @param paParam Array of 32-bit parameters.
5488 */
5489VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTRCPTR pfnHandler, uint32_t cbParam,
5490 uint32_t *paParam)
5491{
5492 int rc, rc2;
5493 PHMGLOBLCPUINFO pCpu;
5494 RTHCPHYS HCPhysCpuPage;
5495 RTHCUINTREG uOldEFlags;
5496
5497 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
5498 Assert(pfnHandler);
5499 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
5500 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
5501
5502#ifdef VBOX_STRICT
5503 for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries;i++)
5504 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
5505
5506 for (unsigned i=0;i<pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries;i++)
5507 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
5508#endif
5509
5510 /* Disable interrupts. */
5511 uOldEFlags = ASMIntDisableFlags();
5512
5513#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
5514 RTCPUID idHostCpu = RTMpCpuId();
5515 CPUMR0SetLApic(pVM, idHostCpu);
5516#endif
5517
5518 pCpu = HMR0GetCurrentCpu();
5519 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
5520
5521 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
5522 VMXClearVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
5523
5524 /* Leave VMX Root Mode. */
5525 VMXDisable();
5526
5527 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
5528
5529 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
5530 CPUMSetHyperEIP(pVCpu, pfnHandler);
5531 for (int i=(int)cbParam-1;i>=0;i--)
5532 CPUMPushHyper(pVCpu, paParam[i]);
5533
5534 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
5535
5536 /* Call switcher. */
5537 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
5538 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
5539
5540 /* Make sure the VMX instructions don't cause #UD faults. */
5541 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
5542
5543 /* Enter VMX Root Mode */
5544 rc2 = VMXEnable(HCPhysCpuPage);
5545 if (RT_FAILURE(rc2))
5546 {
5547 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
5548 ASMSetFlags(uOldEFlags);
5549 return VERR_VMX_VMXON_FAILED;
5550 }
5551
5552 rc2 = VMXActivateVMCS(pVCpu->hm.s.vmx.HCPhysVMCS);
5553 AssertRC(rc2);
5554 Assert(!(ASMGetFlags() & X86_EFL_IF));
5555 ASMSetFlags(uOldEFlags);
5556 return rc;
5557}
5558#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
5559
5560
5561#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
5562/**
5563 * Executes VMWRITE.
5564 *
5565 * @returns VBox status code
5566 * @param pVCpu Pointer to the VMCPU.
5567 * @param idxField VMCS field index.
5568 * @param u64Val 16, 32 or 64 bits value.
5569 */
5570VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5571{
5572 int rc;
5573 switch (idxField)
5574 {
5575 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
5576 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
5577 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
5578 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
5579 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
5580 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
5581 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
5582 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
5583 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
5584 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
5585 case VMX_VMCS64_GUEST_PDPTE0_FULL:
5586 case VMX_VMCS64_GUEST_PDPTE1_FULL:
5587 case VMX_VMCS64_GUEST_PDPTE2_FULL:
5588 case VMX_VMCS64_GUEST_PDPTE3_FULL:
5589 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
5590 case VMX_VMCS64_GUEST_EFER_FULL:
5591 case VMX_VMCS64_CTRL_EPTP_FULL:
5592 /* These fields consist of two parts, which are both writable in 32 bits mode. */
5593 rc = VMXWriteVmcs32(idxField, u64Val);
5594 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32ULL));
5595 AssertRC(rc);
5596 return rc;
5597
5598 case VMX_VMCS_GUEST_LDTR_BASE:
5599 case VMX_VMCS_GUEST_TR_BASE:
5600 case VMX_VMCS_GUEST_GDTR_BASE:
5601 case VMX_VMCS_GUEST_IDTR_BASE:
5602 case VMX_VMCS_GUEST_SYSENTER_EIP:
5603 case VMX_VMCS_GUEST_SYSENTER_ESP:
5604 case VMX_VMCS_GUEST_CR0:
5605 case VMX_VMCS_GUEST_CR4:
5606 case VMX_VMCS_GUEST_CR3:
5607 case VMX_VMCS_GUEST_DR7:
5608 case VMX_VMCS_GUEST_RIP:
5609 case VMX_VMCS_GUEST_RSP:
5610 case VMX_VMCS_GUEST_CS_BASE:
5611 case VMX_VMCS_GUEST_DS_BASE:
5612 case VMX_VMCS_GUEST_ES_BASE:
5613 case VMX_VMCS_GUEST_FS_BASE:
5614 case VMX_VMCS_GUEST_GS_BASE:
5615 case VMX_VMCS_GUEST_SS_BASE:
5616 /* Queue a 64 bits value as we can't set it in 32 bits host mode. */
5617 if (u64Val >> 32ULL)
5618 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
5619 else
5620 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
5621
5622 return rc;
5623
5624 default:
5625 AssertMsgFailed(("Unexpected field %x\n", idxField));
5626 return VERR_INVALID_PARAMETER;
5627 }
5628}
5629
5630
5631/**
5632 * Cache VMCS writes for running 64 bits guests on 32 bits hosts.
5633 *
5634 * @param pVCpu Pointer to the VMCPU.
5635 * @param idxField VMCS field index.
5636 * @param u64Val 16, 32 or 64 bits value.
5637 */
5638VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
5639{
5640 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
5641
5642 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
5643 ("entries=%x\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
5644
5645 /* Make sure there are no duplicates. */
5646 for (unsigned i = 0; i < pCache->Write.cValidEntries; i++)
5647 {
5648 if (pCache->Write.aField[i] == idxField)
5649 {
5650 pCache->Write.aFieldVal[i] = u64Val;
5651 return VINF_SUCCESS;
5652 }
5653 }
5654
5655 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
5656 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
5657 pCache->Write.cValidEntries++;
5658 return VINF_SUCCESS;
5659}
5660
5661#endif /* HC_ARCH_BITS == 32 && !VBOX_WITH_HYBRID_32BIT_KERNEL */
5662
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette