VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 48215

Last change on this file since 48215 was 48215, checked in by vboxsync, 11 years ago

VMM/HMVMXR0: Cleanup.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 442.9 KB
Line 
1/* $Id: HMVMXR0.cpp 48215 2013-08-31 18:38:37Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HMVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iem.h>
32#include <VBox/vmm/iom.h>
33#include <VBox/vmm/selm.h>
34#include <VBox/vmm/tm.h>
35#ifdef VBOX_WITH_REM
36# include <VBox/vmm/rem.h>
37#endif
38#ifdef DEBUG_ramshankar
39#define HMVMX_SAVE_FULL_GUEST_STATE
40#define HMVMX_SYNC_FULL_GUEST_STATE
41#define HMVMX_ALWAYS_CHECK_GUEST_STATE
42#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
43#define HMVMX_ALWAYS_TRAP_PF
44#endif
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50#if defined(RT_ARCH_AMD64)
51# define HMVMX_IS_64BIT_HOST_MODE() (true)
52typedef RTHCUINTREG HMVMXHCUINTREG;
53#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
54extern "C" uint32_t g_fVMXIs64bitHost;
55# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
56typedef uint64_t HMVMXHCUINTREG;
57#else
58# define HMVMX_IS_64BIT_HOST_MODE() (false)
59typedef RTHCUINTREG HMVMXHCUINTREG;
60#endif
61
62/** Use the function table. */
63#define HMVMX_USE_FUNCTION_TABLE
64
65/** Determine which tagged-TLB flush handler to use. */
66#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
67#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
68#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
69#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
70
71/** @name Updated-guest-state flags.
72 * @{ */
73#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
74#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
75#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
76#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
77#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
78#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
79#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
80#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
81#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
82#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
83#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
84#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
85#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
86#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
87#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
88#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
89#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
90#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
91#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
92#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
93#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
94 | HMVMX_UPDATED_GUEST_RSP \
95 | HMVMX_UPDATED_GUEST_RFLAGS \
96 | HMVMX_UPDATED_GUEST_CR0 \
97 | HMVMX_UPDATED_GUEST_CR3 \
98 | HMVMX_UPDATED_GUEST_CR4 \
99 | HMVMX_UPDATED_GUEST_GDTR \
100 | HMVMX_UPDATED_GUEST_IDTR \
101 | HMVMX_UPDATED_GUEST_LDTR \
102 | HMVMX_UPDATED_GUEST_TR \
103 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
104 | HMVMX_UPDATED_GUEST_DEBUG \
105 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
106 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
107 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
108 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
109 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
110 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
111 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
112 | HMVMX_UPDATED_GUEST_APIC_STATE)
113/** @} */
114
115/** @name
116 * Flags to skip redundant reads of some common VMCS fields that are not part of
117 * the guest-CPU state but are in the transient structure.
118 */
119#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
120#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
121#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
122#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
123#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
124#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
125/** @} */
126
127/** @name
128 * States of the VMCS.
129 *
130 * This does not reflect all possible VMCS states but currently only those
131 * needed for maintaining the VMCS consistently even when thread-context hooks
132 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
133 */
134#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
135#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
136#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
137/** @} */
138
139/**
140 * Exception bitmap mask for real-mode guests (real-on-v86).
141 *
142 * We need to intercept all exceptions manually (except #PF). #NM is also
143 * handled separately, see hmR0VmxLoadGuestControlRegs(). #PF need not be
144 * intercepted even in real-mode if we have Nested Paging support.
145 */
146#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
147 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
148 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
149 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
150 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
151 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
152 | RT_BIT(X86_XCPT_XF))
153
154/**
155 * Exception bitmap mask for all contributory exceptions.
156 *
157 * Page fault is deliberately excluded here as it's conditional as to whether
158 * it's contributory or benign. Page faults are handled separately.
159 */
160#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
161 | RT_BIT(X86_XCPT_DE))
162
163/** Maximum VM-instruction error number. */
164#define HMVMX_INSTR_ERROR_MAX 28
165
166/** Profiling macro. */
167#ifdef HM_PROFILE_EXIT_DISPATCH
168# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
169# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
170#else
171# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
172# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
173#endif
174
175/** Assert that preemption is disabled or covered by thread-context hooks. */
176#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
177 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
178
179/** Assert that we haven't migrated CPUs when thread-context hooks are not
180 * used. */
181#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
182 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
183 ("Illegal migration! Entered on CPU %u Current %u\n", \
184 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
185
186/*******************************************************************************
187* Structures and Typedefs *
188*******************************************************************************/
189/**
190 * VMX transient state.
191 *
192 * A state structure for holding miscellaneous information across
193 * VMX non-root operation and restored after the transition.
194 */
195typedef struct VMXTRANSIENT
196{
197 /** The host's rflags/eflags. */
198 RTCCUINTREG uEflags;
199#if HC_ARCH_BITS == 32
200 uint32_t u32Alignment0;
201#endif
202 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
203 uint64_t u64LStarMsr;
204 /** The guest's TPR value used for TPR shadowing. */
205 uint8_t u8GuestTpr;
206 /** Alignment. */
207 uint8_t abAlignment0[7];
208
209 /** The basic VM-exit reason. */
210 uint16_t uExitReason;
211 /** Alignment. */
212 uint16_t u16Alignment0;
213 /** The VM-exit interruption error code. */
214 uint32_t uExitIntrErrorCode;
215 /** The VM-exit exit qualification. */
216 uint64_t uExitQualification;
217
218 /** The VM-exit interruption-information field. */
219 uint32_t uExitIntrInfo;
220 /** The VM-exit instruction-length field. */
221 uint32_t cbInstr;
222 /** The VM-exit instruction-information field. */
223 union
224 {
225 /** Plain unsigned int representation. */
226 uint32_t u;
227 /** INS and OUTS information. */
228 struct
229 {
230 uint32_t u6Reserved0 : 6;
231 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
232 uint32_t u3AddrSize : 3;
233 uint32_t u5Reserved1 : 5;
234 /** The segment register (X86_SREG_XXX). */
235 uint32_t iSegReg : 3;
236 uint32_t uReserved2 : 14;
237 } StrIo;
238 } ExitInstrInfo;
239 /** Whether the VM-entry failed or not. */
240 bool fVMEntryFailed;
241 /** Alignment. */
242 uint8_t abAlignment1[3];
243
244 /** The VM-entry interruption-information field. */
245 uint32_t uEntryIntrInfo;
246 /** The VM-entry exception error code field. */
247 uint32_t uEntryXcptErrorCode;
248 /** The VM-entry instruction length field. */
249 uint32_t cbEntryInstr;
250
251 /** IDT-vectoring information field. */
252 uint32_t uIdtVectoringInfo;
253 /** IDT-vectoring error code. */
254 uint32_t uIdtVectoringErrorCode;
255
256 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
257 uint32_t fVmcsFieldsRead;
258 /** Whether TSC-offsetting should be setup before VM-entry. */
259 bool fUpdateTscOffsettingAndPreemptTimer;
260 /** Whether the VM-exit was caused by a page-fault during delivery of a
261 * contributory exception or a page-fault. */
262 bool fVectoringPF;
263} VMXTRANSIENT;
264AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
265AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
266AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
267AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
268/** Pointer to VMX transient state. */
269typedef VMXTRANSIENT *PVMXTRANSIENT;
270
271
272/**
273 * MSR-bitmap read permissions.
274 */
275typedef enum VMXMSREXITREAD
276{
277 /** Reading this MSR causes a VM-exit. */
278 VMXMSREXIT_INTERCEPT_READ = 0xb,
279 /** Reading this MSR does not cause a VM-exit. */
280 VMXMSREXIT_PASSTHRU_READ
281} VMXMSREXITREAD;
282
283/**
284 * MSR-bitmap write permissions.
285 */
286typedef enum VMXMSREXITWRITE
287{
288 /** Writing to this MSR causes a VM-exit. */
289 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
290 /** Writing to this MSR does not cause a VM-exit. */
291 VMXMSREXIT_PASSTHRU_WRITE
292} VMXMSREXITWRITE;
293
294/**
295 * VM-exit handler.
296 *
297 * @returns VBox status code.
298 * @param pVCpu Pointer to the VMCPU.
299 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
300 * out-of-sync. Make sure to update the required
301 * fields before using them.
302 * @param pVmxTransient Pointer to the VMX-transient structure.
303 */
304#ifndef HMVMX_USE_FUNCTION_TABLE
305typedef int FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
306#else
307typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
308/** Pointer to VM-exit handler. */
309typedef FNVMEXITHANDLER *PFNVMEXITHANDLER;
310#endif
311
312
313/*******************************************************************************
314* Internal Functions *
315*******************************************************************************/
316static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
317static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
318static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
319 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
320#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
321static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
322#endif
323#ifndef HMVMX_USE_FUNCTION_TABLE
324DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
325# define HMVMX_EXIT_DECL static int
326#else
327# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
328#endif
329
330/** @name VM-exit handlers.
331 * @{
332 */
333static FNVMEXITHANDLER hmR0VmxExitXcptOrNmi;
334static FNVMEXITHANDLER hmR0VmxExitExtInt;
335static FNVMEXITHANDLER hmR0VmxExitTripleFault;
336static FNVMEXITHANDLER hmR0VmxExitInitSignal;
337static FNVMEXITHANDLER hmR0VmxExitSipi;
338static FNVMEXITHANDLER hmR0VmxExitIoSmi;
339static FNVMEXITHANDLER hmR0VmxExitSmi;
340static FNVMEXITHANDLER hmR0VmxExitIntWindow;
341static FNVMEXITHANDLER hmR0VmxExitNmiWindow;
342static FNVMEXITHANDLER hmR0VmxExitTaskSwitch;
343static FNVMEXITHANDLER hmR0VmxExitCpuid;
344static FNVMEXITHANDLER hmR0VmxExitGetsec;
345static FNVMEXITHANDLER hmR0VmxExitHlt;
346static FNVMEXITHANDLER hmR0VmxExitInvd;
347static FNVMEXITHANDLER hmR0VmxExitInvlpg;
348static FNVMEXITHANDLER hmR0VmxExitRdpmc;
349static FNVMEXITHANDLER hmR0VmxExitRdtsc;
350static FNVMEXITHANDLER hmR0VmxExitRsm;
351static FNVMEXITHANDLER hmR0VmxExitSetPendingXcptUD;
352static FNVMEXITHANDLER hmR0VmxExitMovCRx;
353static FNVMEXITHANDLER hmR0VmxExitMovDRx;
354static FNVMEXITHANDLER hmR0VmxExitIoInstr;
355static FNVMEXITHANDLER hmR0VmxExitRdmsr;
356static FNVMEXITHANDLER hmR0VmxExitWrmsr;
357static FNVMEXITHANDLER hmR0VmxExitErrInvalidGuestState;
358static FNVMEXITHANDLER hmR0VmxExitErrMsrLoad;
359static FNVMEXITHANDLER hmR0VmxExitErrUndefined;
360static FNVMEXITHANDLER hmR0VmxExitMwait;
361static FNVMEXITHANDLER hmR0VmxExitMtf;
362static FNVMEXITHANDLER hmR0VmxExitMonitor;
363static FNVMEXITHANDLER hmR0VmxExitPause;
364static FNVMEXITHANDLER hmR0VmxExitErrMachineCheck;
365static FNVMEXITHANDLER hmR0VmxExitTprBelowThreshold;
366static FNVMEXITHANDLER hmR0VmxExitApicAccess;
367static FNVMEXITHANDLER hmR0VmxExitXdtrAccess;
368static FNVMEXITHANDLER hmR0VmxExitXdtrAccess;
369static FNVMEXITHANDLER hmR0VmxExitEptViolation;
370static FNVMEXITHANDLER hmR0VmxExitEptMisconfig;
371static FNVMEXITHANDLER hmR0VmxExitRdtscp;
372static FNVMEXITHANDLER hmR0VmxExitPreemptTimer;
373static FNVMEXITHANDLER hmR0VmxExitWbinvd;
374static FNVMEXITHANDLER hmR0VmxExitXsetbv;
375static FNVMEXITHANDLER hmR0VmxExitRdrand;
376static FNVMEXITHANDLER hmR0VmxExitInvpcid;
377/** @} */
378
379static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
380static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
381static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
382static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
383static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
384static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
385static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
386static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
387
388/*******************************************************************************
389* Global Variables *
390*******************************************************************************/
391#ifdef HMVMX_USE_FUNCTION_TABLE
392
393/**
394 * VMX_EXIT dispatch table.
395 */
396static const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
397{
398 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
399 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
400 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
401 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
402 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
403 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
404 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
405 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
406 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
407 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
408 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
409 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
410 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
411 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
412 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
413 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
414 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
415 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
416 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
417 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
418 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
419 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
420 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
421 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
422 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
423 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
424 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
425 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
426 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
427 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
428 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
429 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
430 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
431 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
432 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
433 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
434 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
435 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
436 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
437 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
438 /* 40 UNDEFINED */ hmR0VmxExitPause,
439 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
440 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
441 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
442 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
443 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
444 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
445 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
446 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
447 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
448 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
449 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
450 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
451 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
452 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
453 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
454 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
455 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
456 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
457 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
458};
459#endif /* HMVMX_USE_FUNCTION_TABLE */
460
461#ifdef VBOX_STRICT
462static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
463{
464 /* 0 */ "(Not Used)",
465 /* 1 */ "VMCALL executed in VMX root operation.",
466 /* 2 */ "VMCLEAR with invalid physical address.",
467 /* 3 */ "VMCLEAR with VMXON pointer.",
468 /* 4 */ "VMLAUNCH with non-clear VMCS.",
469 /* 5 */ "VMRESUME with non-launched VMCS.",
470 /* 6 */ "VMRESUME after VMXOFF",
471 /* 7 */ "VM entry with invalid control fields.",
472 /* 8 */ "VM entry with invalid host state fields.",
473 /* 9 */ "VMPTRLD with invalid physical address.",
474 /* 10 */ "VMPTRLD with VMXON pointer.",
475 /* 11 */ "VMPTRLD with incorrect revision identifier.",
476 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
477 /* 13 */ "VMWRITE to read-only VMCS component.",
478 /* 14 */ "(Not Used)",
479 /* 15 */ "VMXON executed in VMX root operation.",
480 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
481 /* 17 */ "VM entry with non-launched executing VMCS.",
482 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
483 /* 19 */ "VMCALL with non-clear VMCS.",
484 /* 20 */ "VMCALL with invalid VM-exit control fields.",
485 /* 21 */ "(Not Used)",
486 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
487 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
488 /* 24 */ "VMCALL with invalid SMM-monitor features.",
489 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
490 /* 26 */ "VM entry with events blocked by MOV SS.",
491 /* 27 */ "(Not Used)",
492 /* 28 */ "Invalid operand to INVEPT/INVVPID."
493};
494#endif /* VBOX_STRICT */
495
496
497
498/**
499 * Updates the VM's last error record. If there was a VMX instruction error,
500 * reads the error data from the VMCS and updates VCPU's last error record as
501 * well.
502 *
503 * @param pVM Pointer to the VM.
504 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
505 * VERR_VMX_UNABLE_TO_START_VM or
506 * VERR_VMX_INVALID_VMCS_FIELD).
507 * @param rc The error code.
508 */
509static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
510{
511 AssertPtr(pVM);
512 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
513 || rc == VERR_VMX_UNABLE_TO_START_VM)
514 {
515 AssertPtrReturnVoid(pVCpu);
516 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
517 }
518 pVM->hm.s.lLastError = rc;
519}
520
521
522/**
523 * Reads the VM-entry interruption-information field from the VMCS into the VMX
524 * transient structure.
525 *
526 * @returns VBox status code.
527 * @param pVmxTransient Pointer to the VMX transient structure.
528 *
529 * @remarks No-long-jump zone!!!
530 */
531DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
532{
533 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
534 AssertRCReturn(rc, rc);
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Reads the VM-entry exception error code field from the VMCS into
541 * the VMX transient structure.
542 *
543 * @returns VBox status code.
544 * @param pVmxTransient Pointer to the VMX transient structure.
545 *
546 * @remarks No-long-jump zone!!!
547 */
548DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
549{
550 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
551 AssertRCReturn(rc, rc);
552 return VINF_SUCCESS;
553}
554
555
556/**
557 * Reads the VM-entry exception error code field from the VMCS into
558 * the VMX transient structure.
559 *
560 * @returns VBox status code.
561 * @param pVCpu Pointer to the VMCPU.
562 * @param pVmxTransient Pointer to the VMX transient structure.
563 *
564 * @remarks No-long-jump zone!!!
565 */
566DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
567{
568 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
569 AssertRCReturn(rc, rc);
570 return VINF_SUCCESS;
571}
572
573
574/**
575 * Reads the VM-exit interruption-information field from the VMCS into the VMX
576 * transient structure.
577 *
578 * @returns VBox status code.
579 * @param pVCpu Pointer to the VMCPU.
580 * @param pVmxTransient Pointer to the VMX transient structure.
581 */
582DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
583{
584 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
585 {
586 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
587 AssertRCReturn(rc, rc);
588 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
589 }
590 return VINF_SUCCESS;
591}
592
593
594/**
595 * Reads the VM-exit interruption error code from the VMCS into the VMX
596 * transient structure.
597 *
598 * @returns VBox status code.
599 * @param pVCpu Pointer to the VMCPU.
600 * @param pVmxTransient Pointer to the VMX transient structure.
601 */
602DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
603{
604 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
605 {
606 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
607 AssertRCReturn(rc, rc);
608 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
609 }
610 return VINF_SUCCESS;
611}
612
613
614/**
615 * Reads the VM-exit instruction length field from the VMCS into the VMX
616 * transient structure.
617 *
618 * @returns VBox status code.
619 * @param pVCpu Pointer to the VMCPU.
620 * @param pVmxTransient Pointer to the VMX transient structure.
621 */
622DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
623{
624 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
625 {
626 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
627 AssertRCReturn(rc, rc);
628 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
629 }
630 return VINF_SUCCESS;
631}
632
633
634/**
635 * Reads the VM-exit instruction-information field from the VMCS into
636 * the VMX transient structure.
637 *
638 * @returns VBox status code.
639 * @param pVCpu The cross context per CPU structure.
640 * @param pVmxTransient Pointer to the VMX transient structure.
641 */
642DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
643{
644 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
645 {
646 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->cbInstr);
647 AssertRCReturn(rc, rc);
648 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
649 }
650 return VINF_SUCCESS;
651}
652
653
654/**
655 * Reads the exit qualification from the VMCS into the VMX transient structure.
656 *
657 * @returns VBox status code.
658 * @param pVCpu Pointer to the VMCPU.
659 * @param pVmxTransient Pointer to the VMX transient structure.
660 */
661DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
662{
663 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
664 {
665 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
666 AssertRCReturn(rc, rc);
667 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
668 }
669 return VINF_SUCCESS;
670}
671
672
673/**
674 * Reads the IDT-vectoring information field from the VMCS into the VMX
675 * transient structure.
676 *
677 * @returns VBox status code.
678 * @param pVmxTransient Pointer to the VMX transient structure.
679 *
680 * @remarks No-long-jump zone!!!
681 */
682DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
683{
684 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
685 {
686 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
687 AssertRCReturn(rc, rc);
688 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
689 }
690 return VINF_SUCCESS;
691}
692
693
694/**
695 * Reads the IDT-vectoring error code from the VMCS into the VMX
696 * transient structure.
697 *
698 * @returns VBox status code.
699 * @param pVmxTransient Pointer to the VMX transient structure.
700 */
701DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
702{
703 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
704 {
705 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
706 AssertRCReturn(rc, rc);
707 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
708 }
709 return VINF_SUCCESS;
710}
711
712
713/**
714 * Enters VMX root mode operation on the current CPU.
715 *
716 * @returns VBox status code.
717 * @param pVM Pointer to the VM (optional, can be NULL, after
718 * a resume).
719 * @param HCPhysCpuPage Physical address of the VMXON region.
720 * @param pvCpuPage Pointer to the VMXON region.
721 */
722static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
723{
724 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
725 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
726 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
727
728 if (pVM)
729 {
730 /* Write the VMCS revision dword to the VMXON region. */
731 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.u64BasicInfo);
732 }
733
734 /* Enable the VMX bit in CR4 if necessary. */
735 RTCCUINTREG uCr4 = ASMGetCR4();
736 if (!(uCr4 & X86_CR4_VMXE))
737 ASMSetCR4(uCr4 | X86_CR4_VMXE);
738
739 /* Enter VMX root mode. */
740 int rc = VMXEnable(HCPhysCpuPage);
741 if (RT_FAILURE(rc))
742 ASMSetCR4(uCr4);
743
744 return rc;
745}
746
747
748/**
749 * Exits VMX root mode operation on the current CPU.
750 *
751 * @returns VBox status code.
752 */
753static int hmR0VmxLeaveRootMode(void)
754{
755 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
756
757 /* If we're for some reason not in VMX root mode, then don't leave it. */
758 RTCCUINTREG uHostCR4 = ASMGetCR4();
759 if (uHostCR4 & X86_CR4_VMXE)
760 {
761 /* Exit VMX root mode and clear the VMX bit in CR4. */
762 VMXDisable();
763 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
764 return VINF_SUCCESS;
765 }
766
767 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
768}
769
770
771/**
772 * Allocates and maps one physically contiguous page. The allocated page is
773 * zero'd out. (Used by various VT-x structures).
774 *
775 * @returns IPRT status code.
776 * @param pMemObj Pointer to the ring-0 memory object.
777 * @param ppVirt Where to store the virtual address of the
778 * allocation.
779 * @param pPhys Where to store the physical address of the
780 * allocation.
781 */
782DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
783{
784 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
785 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
786 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
787
788 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
789 if (RT_FAILURE(rc))
790 return rc;
791 *ppVirt = RTR0MemObjAddress(*pMemObj);
792 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
793 ASMMemZero32(*ppVirt, PAGE_SIZE);
794 return VINF_SUCCESS;
795}
796
797
798/**
799 * Frees and unmaps an allocated physical page.
800 *
801 * @param pMemObj Pointer to the ring-0 memory object.
802 * @param ppVirt Where to re-initialize the virtual address of
803 * allocation as 0.
804 * @param pHCPhys Where to re-initialize the physical address of the
805 * allocation as 0.
806 */
807DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
808{
809 AssertPtr(pMemObj);
810 AssertPtr(ppVirt);
811 AssertPtr(pHCPhys);
812 if (*pMemObj != NIL_RTR0MEMOBJ)
813 {
814 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
815 AssertRC(rc);
816 *pMemObj = NIL_RTR0MEMOBJ;
817 *ppVirt = 0;
818 *pHCPhys = 0;
819 }
820}
821
822
823/**
824 * Worker function to free VT-x related structures.
825 *
826 * @returns IPRT status code.
827 * @param pVM Pointer to the VM.
828 */
829static void hmR0VmxStructsFree(PVM pVM)
830{
831 for (VMCPUID i = 0; i < pVM->cCpus; i++)
832 {
833 PVMCPU pVCpu = &pVM->aCpus[i];
834 AssertPtr(pVCpu);
835
836#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
837 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
838 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
839#endif
840
841 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
842 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
843
844 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
845 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
846 }
847
848 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
849#ifdef VBOX_WITH_CRASHDUMP_MAGIC
850 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
851#endif
852}
853
854
855/**
856 * Worker function to allocate VT-x related VM structures.
857 *
858 * @returns IPRT status code.
859 * @param pVM Pointer to the VM.
860 */
861static int hmR0VmxStructsAlloc(PVM pVM)
862{
863 /*
864 * Initialize members up-front so we can cleanup properly on allocation failure.
865 */
866#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
867 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
868 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
869 pVM->hm.s.vmx.HCPhys##a_Name = 0;
870
871#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
872 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
873 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
874 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
875
876#ifdef VBOX_WITH_CRASHDUMP_MAGIC
877 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
878#endif
879 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
880
881 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
882 for (VMCPUID i = 0; i < pVM->cCpus; i++)
883 {
884 PVMCPU pVCpu = &pVM->aCpus[i];
885 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
886 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
887 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
888#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
889 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
890 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
891#endif
892 }
893#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
894#undef VMXLOCAL_INIT_VM_MEMOBJ
895
896 /*
897 * Allocate all the VT-x structures.
898 */
899 int rc = VINF_SUCCESS;
900#ifdef VBOX_WITH_CRASHDUMP_MAGIC
901 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
902 if (RT_FAILURE(rc))
903 goto cleanup;
904 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
905 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
906#endif
907
908 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
909 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
910 {
911 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
912 &pVM->hm.s.vmx.HCPhysApicAccess);
913 if (RT_FAILURE(rc))
914 goto cleanup;
915 }
916
917 /*
918 * Initialize per-VCPU VT-x structures.
919 */
920 for (VMCPUID i = 0; i < pVM->cCpus; i++)
921 {
922 PVMCPU pVCpu = &pVM->aCpus[i];
923 AssertPtr(pVCpu);
924
925 /* Allocate the VM control structure (VMCS). */
926 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.u64BasicInfo) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
927 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
928 if (RT_FAILURE(rc))
929 goto cleanup;
930
931 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
932 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
933 {
934 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
935 &pVCpu->hm.s.vmx.HCPhysVirtApic);
936 if (RT_FAILURE(rc))
937 goto cleanup;
938 }
939
940 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
941 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
942 {
943 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
944 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
945 if (RT_FAILURE(rc))
946 goto cleanup;
947 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
948 }
949
950#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
951 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
952 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
953 if (RT_FAILURE(rc))
954 goto cleanup;
955
956 /* Allocate the VM-exit MSR-load page for the host MSRs. */
957 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
958 if (RT_FAILURE(rc))
959 goto cleanup;
960#endif
961 }
962
963 return VINF_SUCCESS;
964
965cleanup:
966 hmR0VmxStructsFree(pVM);
967 return rc;
968}
969
970
971/**
972 * Does global VT-x initialization (called during module initialization).
973 *
974 * @returns VBox status code.
975 */
976VMMR0DECL(int) VMXR0GlobalInit(void)
977{
978#ifdef HMVMX_USE_FUNCTION_TABLE
979 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
980# ifdef VBOX_STRICT
981 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
982 Assert(g_apfnVMExitHandlers[i]);
983# endif
984#endif
985 return VINF_SUCCESS;
986}
987
988
989/**
990 * Does global VT-x termination (called during module termination).
991 */
992VMMR0DECL(void) VMXR0GlobalTerm()
993{
994 /* Nothing to do currently. */
995}
996
997
998/**
999 * Sets up and activates VT-x on the current CPU.
1000 *
1001 * @returns VBox status code.
1002 * @param pCpu Pointer to the global CPU info struct.
1003 * @param pVM Pointer to the VM (can be NULL after a host resume
1004 * operation).
1005 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1006 * fEnabledByHost is true).
1007 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1008 * @a fEnabledByHost is true).
1009 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1010 * enable VT-x on the host.
1011 */
1012VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
1013{
1014 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
1015 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1016
1017 if (!fEnabledByHost)
1018 {
1019 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1020 if (RT_FAILURE(rc))
1021 return rc;
1022 }
1023
1024 /*
1025 * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that
1026 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
1027 * each time while reusing a VPID after hitting the MaxASID limit once.
1028 */
1029 if ( pVM
1030 && pVM->hm.s.fNestedPaging)
1031 {
1032 /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */
1033 Assert(pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1034 hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
1035 pCpu->fFlushAsidBeforeUse = false;
1036 }
1037 else
1038 {
1039 /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM
1040 * without Nested Paging triggered this function) we still have the risk
1041 * of potentially running with stale TLB-entries from other hypervisors
1042 * when later we use a VM with NestedPaging. To fix this properly we will
1043 * have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read
1044 * 'u64EptVpidCaps' from it. Sigh. */
1045 pCpu->fFlushAsidBeforeUse = true;
1046 }
1047
1048 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1049 ++pCpu->cTlbFlushes;
1050
1051 return VINF_SUCCESS;
1052}
1053
1054
1055/**
1056 * Deactivates VT-x on the current CPU.
1057 *
1058 * @returns VBox status code.
1059 * @param pCpu Pointer to the global CPU info struct.
1060 * @param pvCpuPage Pointer to the VMXON region.
1061 * @param HCPhysCpuPage Physical address of the VMXON region.
1062 *
1063 * @remarks This function should never be called when SUPR0EnableVTx() or
1064 * similar was used to enable VT-x on the host.
1065 */
1066VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1067{
1068 NOREF(pCpu);
1069 NOREF(pvCpuPage);
1070 NOREF(HCPhysCpuPage);
1071
1072 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1073 return hmR0VmxLeaveRootMode();
1074}
1075
1076
1077/**
1078 * Sets the permission bits for the specified MSR in the MSR bitmap.
1079 *
1080 * @param pVCpu Pointer to the VMCPU.
1081 * @param uMSR The MSR value.
1082 * @param enmRead Whether reading this MSR causes a VM-exit.
1083 * @param enmWrite Whether writing this MSR causes a VM-exit.
1084 */
1085static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1086{
1087 int32_t iBit;
1088 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1089
1090 /*
1091 * Layout:
1092 * 0x000 - 0x3ff - Low MSR read bits
1093 * 0x400 - 0x7ff - High MSR read bits
1094 * 0x800 - 0xbff - Low MSR write bits
1095 * 0xc00 - 0xfff - High MSR write bits
1096 */
1097 if (uMsr <= 0x00001FFF)
1098 iBit = uMsr;
1099 else if ( uMsr >= 0xC0000000
1100 && uMsr <= 0xC0001FFF)
1101 {
1102 iBit = (uMsr - 0xC0000000);
1103 pbMsrBitmap += 0x400;
1104 }
1105 else
1106 {
1107 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1108 return;
1109 }
1110
1111 Assert(iBit <= 0x1fff);
1112 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1113 ASMBitSet(pbMsrBitmap, iBit);
1114 else
1115 ASMBitClear(pbMsrBitmap, iBit);
1116
1117 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1118 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1119 else
1120 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1121}
1122
1123
1124/**
1125 * Flushes the TLB using EPT.
1126 *
1127 * @returns VBox status code.
1128 * @param pVM Pointer to the VM.
1129 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1130 * enmFlush).
1131 * @param enmFlush Type of flush.
1132 */
1133static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
1134{
1135 AssertPtr(pVM);
1136 Assert(pVM->hm.s.fNestedPaging);
1137
1138 uint64_t descriptor[2];
1139 if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS)
1140 descriptor[0] = 0;
1141 else
1142 {
1143 Assert(pVCpu);
1144 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1145 }
1146 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1147
1148 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1149 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1150 rc));
1151 if ( RT_SUCCESS(rc)
1152 && pVCpu)
1153 {
1154 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1155 }
1156}
1157
1158
1159/**
1160 * Flushes the TLB using VPID.
1161 *
1162 * @returns VBox status code.
1163 * @param pVM Pointer to the VM.
1164 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1165 * enmFlush).
1166 * @param enmFlush Type of flush.
1167 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1168 * on @a enmFlush).
1169 */
1170static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1171{
1172 AssertPtr(pVM);
1173 Assert(pVM->hm.s.vmx.fVpid);
1174
1175 uint64_t descriptor[2];
1176 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1177 {
1178 descriptor[0] = 0;
1179 descriptor[1] = 0;
1180 }
1181 else
1182 {
1183 AssertPtr(pVCpu);
1184 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1185 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1186 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1187 descriptor[1] = GCPtr;
1188 }
1189
1190 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1191 AssertMsg(rc == VINF_SUCCESS,
1192 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1193 if ( RT_SUCCESS(rc)
1194 && pVCpu)
1195 {
1196 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1197 }
1198}
1199
1200
1201/**
1202 * Invalidates a guest page by guest virtual address. Only relevant for
1203 * EPT/VPID, otherwise there is nothing really to invalidate.
1204 *
1205 * @returns VBox status code.
1206 * @param pVM Pointer to the VM.
1207 * @param pVCpu Pointer to the VMCPU.
1208 * @param GCVirt Guest virtual address of the page to invalidate.
1209 */
1210VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1211{
1212 AssertPtr(pVM);
1213 AssertPtr(pVCpu);
1214 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1215
1216 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1217 if (!fFlushPending)
1218 {
1219 /*
1220 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1221 * See @bugref{6043} and @bugref{6177}.
1222 *
1223 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1224 * function maybe called in a loop with individual addresses.
1225 */
1226 if (pVM->hm.s.vmx.fVpid)
1227 {
1228 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1229 {
1230 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1231 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1232 }
1233 else
1234 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1235 }
1236 else if (pVM->hm.s.fNestedPaging)
1237 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1238 }
1239
1240 return VINF_SUCCESS;
1241}
1242
1243
1244/**
1245 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1246 * otherwise there is nothing really to invalidate.
1247 *
1248 * @returns VBox status code.
1249 * @param pVM Pointer to the VM.
1250 * @param pVCpu Pointer to the VMCPU.
1251 * @param GCPhys Guest physical address of the page to invalidate.
1252 */
1253VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1254{
1255 LogFlowFunc(("%RGp\n", GCPhys));
1256
1257 /*
1258 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1259 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1260 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1261 */
1262 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1263 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1264 return VINF_SUCCESS;
1265}
1266
1267
1268/**
1269 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1270 * case where neither EPT nor VPID is supported by the CPU.
1271 *
1272 * @param pVM Pointer to the VM.
1273 * @param pVCpu Pointer to the VMCPU.
1274 *
1275 * @remarks Called with interrupts disabled.
1276 */
1277static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1278{
1279 NOREF(pVM);
1280 AssertPtr(pVCpu);
1281 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1282 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1283
1284 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1285 AssertPtr(pCpu);
1286
1287 pVCpu->hm.s.TlbShootdown.cPages = 0;
1288 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1289 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1290 pVCpu->hm.s.fForceTLBFlush = false;
1291 return;
1292}
1293
1294
1295/**
1296 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1297 *
1298 * @param pVM Pointer to the VM.
1299 * @param pVCpu Pointer to the VMCPU.
1300 * @remarks All references to "ASID" in this function pertains to "VPID" in
1301 * Intel's nomenclature. The reason is, to avoid confusion in compare
1302 * statements since the host-CPU copies are named "ASID".
1303 *
1304 * @remarks Called with interrupts disabled.
1305 */
1306static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1307{
1308#ifdef VBOX_WITH_STATISTICS
1309 bool fTlbFlushed = false;
1310# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1311# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1312 if (!fTlbFlushed) \
1313 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1314 } while (0)
1315#else
1316# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1317# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1318#endif
1319
1320 AssertPtr(pVM);
1321 AssertPtr(pVCpu);
1322 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1323 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1324 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1325
1326 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1327 AssertPtr(pCpu);
1328
1329 /*
1330 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1331 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1332 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1333 */
1334 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1335 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1336 {
1337 ++pCpu->uCurrentAsid;
1338 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1339 {
1340 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1341 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1342 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1343 }
1344
1345 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1346 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1347 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1348
1349 /*
1350 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1351 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1352 */
1353 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1354 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1355 HMVMX_SET_TAGGED_TLB_FLUSHED();
1356 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1357 }
1358
1359 /* Check for explicit TLB shootdowns. */
1360 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1361 {
1362 /*
1363 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1364 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1365 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1366 * but not guest-physical mappings.
1367 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1368 */
1369 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1370 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1371 HMVMX_SET_TAGGED_TLB_FLUSHED();
1372 }
1373
1374 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1375 * not be executed. See hmQueueInvlPage() where it is commented
1376 * out. Support individual entry flushing someday. */
1377 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1378 {
1379 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1380
1381 /*
1382 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1383 * as supported by the CPU.
1384 */
1385 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1386 {
1387 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1388 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1389 }
1390 else
1391 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1392
1393 HMVMX_SET_TAGGED_TLB_FLUSHED();
1394 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1395 }
1396
1397 pVCpu->hm.s.TlbShootdown.cPages = 0;
1398 pVCpu->hm.s.fForceTLBFlush = false;
1399
1400 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1401
1402 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1403 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1404 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1405 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1406 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1407 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1408 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1409 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1410
1411 /* Update VMCS with the VPID. */
1412 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1413 AssertRC(rc);
1414
1415#undef HMVMX_SET_TAGGED_TLB_FLUSHED
1416}
1417
1418
1419/**
1420 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1421 *
1422 * @returns VBox status code.
1423 * @param pVM Pointer to the VM.
1424 * @param pVCpu Pointer to the VMCPU.
1425 *
1426 * @remarks Called with interrupts disabled.
1427 */
1428static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1429{
1430 AssertPtr(pVM);
1431 AssertPtr(pVCpu);
1432 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1433 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1434
1435 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1436 AssertPtr(pCpu);
1437
1438 /*
1439 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1440 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1441 */
1442 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1443 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1444 {
1445 pVCpu->hm.s.fForceTLBFlush = true;
1446 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1447 }
1448
1449 /* Check for explicit TLB shootdown flushes. */
1450 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1451 {
1452 pVCpu->hm.s.fForceTLBFlush = true;
1453 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1454 }
1455
1456 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1457 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1458
1459 if (pVCpu->hm.s.fForceTLBFlush)
1460 {
1461 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1462 pVCpu->hm.s.fForceTLBFlush = false;
1463 }
1464 else
1465 {
1466 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1467 * not be executed. See hmQueueInvlPage() where it is commented
1468 * out. Support individual entry flushing someday. */
1469 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1470 {
1471 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1472 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1473 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1474 }
1475 else
1476 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1477 }
1478
1479 pVCpu->hm.s.TlbShootdown.cPages = 0;
1480 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1481}
1482
1483
1484/**
1485 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1486 *
1487 * @returns VBox status code.
1488 * @param pVM Pointer to the VM.
1489 * @param pVCpu Pointer to the VMCPU.
1490 *
1491 * @remarks Called with interrupts disabled.
1492 */
1493static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1494{
1495 AssertPtr(pVM);
1496 AssertPtr(pVCpu);
1497 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1498 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1499
1500 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1501
1502 /*
1503 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1504 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1505 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1506 */
1507 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1508 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1509 {
1510 pVCpu->hm.s.fForceTLBFlush = true;
1511 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1512 }
1513
1514 /* Check for explicit TLB shootdown flushes. */
1515 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1516 {
1517 /*
1518 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1519 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1520 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1521 */
1522 pVCpu->hm.s.fForceTLBFlush = true;
1523 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1524 }
1525
1526 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1527 if (pVCpu->hm.s.fForceTLBFlush)
1528 {
1529 ++pCpu->uCurrentAsid;
1530 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1531 {
1532 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
1533 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1534 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1535 }
1536
1537 pVCpu->hm.s.fForceTLBFlush = false;
1538 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1539 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1540 if (pCpu->fFlushAsidBeforeUse)
1541 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1542 }
1543 else
1544 {
1545 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1546 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1547 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1548 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1549
1550 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1551 * not be executed. See hmQueueInvlPage() where it is commented
1552 * out. Support individual entry flushing someday. */
1553 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1554 {
1555 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1556 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1557 {
1558 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1559 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1560 }
1561 else
1562 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1563 }
1564 else
1565 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1566 }
1567
1568 pVCpu->hm.s.TlbShootdown.cPages = 0;
1569 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1570
1571 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1572 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1573 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1574 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1575 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1576 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1577
1578 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1579 AssertRC(rc);
1580}
1581
1582
1583/**
1584 * Flushes the guest TLB entry based on CPU capabilities.
1585 *
1586 * @param pVCpu Pointer to the VMCPU.
1587 */
1588DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
1589{
1590 PVM pVM = pVCpu->CTX_SUFF(pVM);
1591 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
1592 {
1593 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
1594 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
1595 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
1596 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
1597 default:
1598 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
1599 break;
1600 }
1601}
1602
1603
1604/**
1605 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1606 * TLB entries from the host TLB before VM-entry.
1607 *
1608 * @returns VBox status code.
1609 * @param pVM Pointer to the VM.
1610 */
1611static int hmR0VmxSetupTaggedTlb(PVM pVM)
1612{
1613 /*
1614 * Determine optimal flush type for Nested Paging.
1615 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1616 * guest execution (see hmR3InitFinalizeR0()).
1617 */
1618 if (pVM->hm.s.fNestedPaging)
1619 {
1620 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1621 {
1622 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1623 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1624 else if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1625 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1626 else
1627 {
1628 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1629 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1630 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1631 }
1632
1633 /* Make sure the write-back cacheable memory type for EPT is supported. */
1634 if (!(pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1635 {
1636 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.u64EptVpidCaps));
1637 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1638 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1639 }
1640 }
1641 else
1642 {
1643 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1644 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1645 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1646 }
1647 }
1648
1649 /*
1650 * Determine optimal flush type for VPID.
1651 */
1652 if (pVM->hm.s.vmx.fVpid)
1653 {
1654 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1655 {
1656 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1657 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1658 else if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1659 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1660 else
1661 {
1662 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1663 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1664 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1665 if (pVM->hm.s.vmx.msr.u64EptVpidCaps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1666 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1667 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1668 pVM->hm.s.vmx.fVpid = false;
1669 }
1670 }
1671 else
1672 {
1673 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1674 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1675 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1676 pVM->hm.s.vmx.fVpid = false;
1677 }
1678 }
1679
1680 /*
1681 * Setup the handler for flushing tagged-TLBs.
1682 */
1683 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1684 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
1685 else if (pVM->hm.s.fNestedPaging)
1686 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
1687 else if (pVM->hm.s.vmx.fVpid)
1688 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
1689 else
1690 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
1691 return VINF_SUCCESS;
1692}
1693
1694
1695/**
1696 * Sets up pin-based VM-execution controls in the VMCS.
1697 *
1698 * @returns VBox status code.
1699 * @param pVM Pointer to the VM.
1700 * @param pVCpu Pointer to the VMCPU.
1701 */
1702static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1703{
1704 AssertPtr(pVM);
1705 AssertPtr(pVCpu);
1706
1707 uint32_t val = pVM->hm.s.vmx.msr.VmxPinCtls.n.disallowed0; /* Bits set here must always be set. */
1708 uint32_t zap = pVM->hm.s.vmx.msr.VmxPinCtls.n.allowed1; /* Bits cleared here must always be cleared. */
1709
1710 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1711 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1712 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
1713
1714 /* Enable the VMX preemption timer. */
1715 if (pVM->hm.s.vmx.fUsePreemptTimer)
1716 {
1717 Assert(pVM->hm.s.vmx.msr.VmxPinCtls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
1718 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
1719 }
1720
1721 if ((val & zap) != val)
1722 {
1723 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1724 pVM->hm.s.vmx.msr.VmxPinCtls.n.disallowed0, val, zap));
1725 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
1726 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1727 }
1728
1729 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
1730 AssertRCReturn(rc, rc);
1731
1732 /* Update VCPU with the currently set pin-based VM-execution controls. */
1733 pVCpu->hm.s.vmx.u32PinCtls = val;
1734 return rc;
1735}
1736
1737
1738/**
1739 * Sets up processor-based VM-execution controls in the VMCS.
1740 *
1741 * @returns VBox status code.
1742 * @param pVM Pointer to the VM.
1743 * @param pVMCPU Pointer to the VMCPU.
1744 */
1745static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1746{
1747 AssertPtr(pVM);
1748 AssertPtr(pVCpu);
1749
1750 int rc = VERR_INTERNAL_ERROR_5;
1751 uint32_t val = pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1752 uint32_t zap = pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1753
1754 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
1755 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1756 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1757 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1758 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1759 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1760 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1761
1762 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1763 if ( !(pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
1764 || (pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
1765 {
1766 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
1767 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
1768 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1769 }
1770
1771 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1772 if (!pVM->hm.s.fNestedPaging)
1773 {
1774 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1775 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
1776 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
1777 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
1778 }
1779
1780 /* Use TPR shadowing if supported by the CPU. */
1781 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
1782 {
1783 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1784 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1785 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1786 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1787 AssertRCReturn(rc, rc);
1788
1789 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1790 /* CR8 writes causes a VM-exit based on TPR threshold. */
1791 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
1792 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
1793 }
1794 else
1795 {
1796 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1797 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1798 }
1799
1800 /* Use MSR-bitmaps if supported by the CPU. */
1801 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1802 {
1803 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
1804
1805 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1806 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1807 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1808 AssertRCReturn(rc, rc);
1809
1810 /*
1811 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1812 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1813 */
1814 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1815 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1816 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1817 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1818 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1819 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1820 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1821 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1822 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1823 }
1824
1825 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1826 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1827 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1828
1829 if ((val & zap) != val)
1830 {
1831 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1832 pVM->hm.s.vmx.msr.VmxProcCtls.n.disallowed0, val, zap));
1833 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
1834 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1835 }
1836
1837 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
1838 AssertRCReturn(rc, rc);
1839
1840 /* Update VCPU with the currently set processor-based VM-execution controls. */
1841 pVCpu->hm.s.vmx.u32ProcCtls = val;
1842
1843 /*
1844 * Secondary processor-based VM-execution controls.
1845 */
1846 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1847 {
1848 val = pVM->hm.s.vmx.msr.VmxProcCtls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1849 zap = pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1850
1851 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1852 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1853
1854 if (pVM->hm.s.fNestedPaging)
1855 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1856 else
1857 {
1858 /*
1859 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1860 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
1861 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1862 */
1863 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1864 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1865 }
1866
1867 if (pVM->hm.s.vmx.fVpid)
1868 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1869
1870 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1871 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1872
1873 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1874 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1875 * done dynamically. */
1876 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1877 {
1878 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1879 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1880 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1881 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1882 AssertRCReturn(rc, rc);
1883 }
1884
1885 if (pVM->hm.s.vmx.msr.VmxProcCtls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1886 {
1887 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1888 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1889 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1890 }
1891
1892 if ((val & zap) != val)
1893 {
1894 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1895 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.VmxProcCtls2.n.disallowed0, val, zap));
1896 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1897 }
1898
1899 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
1900 AssertRCReturn(rc, rc);
1901
1902 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1903 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1904 }
1905 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
1906 {
1907 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
1908 "available\n"));
1909 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1910 }
1911
1912 return VINF_SUCCESS;
1913}
1914
1915
1916/**
1917 * Sets up miscellaneous (everything other than Pin & Processor-based
1918 * VM-execution) control fields in the VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVM Pointer to the VM.
1922 * @param pVCpu Pointer to the VMCPU.
1923 */
1924static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1925{
1926 AssertPtr(pVM);
1927 AssertPtr(pVCpu);
1928
1929 int rc = VERR_GENERAL_FAILURE;
1930
1931 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1932#if 0
1933 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1934 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
1935 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
1936
1937 /*
1938 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1939 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1940 * We thus use the exception bitmap to control it rather than use both.
1941 */
1942 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
1943 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
1944
1945 /** @todo Explore possibility of using IO-bitmaps. */
1946 /* All IO & IOIO instructions cause VM-exits. */
1947 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
1948 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
1949
1950 /* Initialize the MSR-bitmap area. */
1951 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1952 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
1953 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1954#endif
1955
1956#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1957 /* Setup MSR autoloading/storing. */
1958 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1959 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1960 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1961 AssertRCReturn(rc, rc);
1962 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1963 AssertRCReturn(rc, rc);
1964
1965 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1966 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1967 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1968 AssertRCReturn(rc, rc);
1969#endif
1970
1971 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1972 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
1973 AssertRCReturn(rc, rc);
1974
1975 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1976#if 0
1977 /* Setup debug controls */
1978 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
1979 AssertRCReturn(rc, rc);
1980 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
1981 AssertRCReturn(rc, rc);
1982#endif
1983
1984 return rc;
1985}
1986
1987
1988/**
1989 * Sets up the initial exception bitmap in the VMCS based on static conditions
1990 * (i.e. conditions that cannot ever change at runtime).
1991 *
1992 * @returns VBox status code.
1993 * @param pVM Pointer to the VM.
1994 * @param pVCpu Pointer to the VMCPU.
1995 */
1996static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1997{
1998 AssertPtr(pVM);
1999 AssertPtr(pVCpu);
2000
2001 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2002
2003 uint32_t u32XcptBitmap = 0;
2004
2005 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2006 if (!pVM->hm.s.fNestedPaging)
2007 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2008
2009 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2010 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2011 AssertRCReturn(rc, rc);
2012 return rc;
2013}
2014
2015
2016/**
2017 * Sets up the initial guest-state mask. The guest-state mask is consulted
2018 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2019 * for the nested virtualization case (as it would cause a VM-exit).
2020 *
2021 * @param pVCpu Pointer to the VMCPU.
2022 */
2023static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2024{
2025 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2026 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
2027 return VINF_SUCCESS;
2028}
2029
2030
2031/**
2032 * Does per-VM VT-x initialization.
2033 *
2034 * @returns VBox status code.
2035 * @param pVM Pointer to the VM.
2036 */
2037VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2038{
2039 LogFlowFunc(("pVM=%p\n", pVM));
2040
2041 int rc = hmR0VmxStructsAlloc(pVM);
2042 if (RT_FAILURE(rc))
2043 {
2044 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2045 return rc;
2046 }
2047
2048 return VINF_SUCCESS;
2049}
2050
2051
2052/**
2053 * Does per-VM VT-x termination.
2054 *
2055 * @returns VBox status code.
2056 * @param pVM Pointer to the VM.
2057 */
2058VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2059{
2060 LogFlowFunc(("pVM=%p\n", pVM));
2061
2062#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2063 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2064 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2065#endif
2066 hmR0VmxStructsFree(pVM);
2067 return VINF_SUCCESS;
2068}
2069
2070
2071/**
2072 * Sets up the VM for execution under VT-x.
2073 * This function is only called once per-VM during initialization.
2074 *
2075 * @returns VBox status code.
2076 * @param pVM Pointer to the VM.
2077 */
2078VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2079{
2080 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2081 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2082
2083 LogFlowFunc(("pVM=%p\n", pVM));
2084
2085 /*
2086 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2087 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
2088 */
2089 /* -XXX- change hmR3InitFinalizeR0Intel() to fail if pRealModeTSS alloc fails. */
2090 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2091 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2092 || !pVM->hm.s.vmx.pRealModeTSS))
2093 {
2094 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2095 return VERR_INTERNAL_ERROR;
2096 }
2097
2098#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2099 /*
2100 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
2101 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
2102 */
2103 if ( pVM->hm.s.fAllow64BitGuests
2104 && !HMVMX_IS_64BIT_HOST_MODE())
2105 {
2106 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
2107 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
2108 }
2109#endif
2110
2111 /* Initialize these always, see hmR3InitFinalizeR0().*/
2112 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
2113 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
2114
2115 /* Setup the tagged-TLB flush handlers. */
2116 int rc = hmR0VmxSetupTaggedTlb(pVM);
2117 if (RT_FAILURE(rc))
2118 {
2119 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2120 return rc;
2121 }
2122
2123 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2124 {
2125 PVMCPU pVCpu = &pVM->aCpus[i];
2126 AssertPtr(pVCpu);
2127 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2128
2129 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2130 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2131
2132 /* Set revision dword at the beginning of the VMCS structure. */
2133 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.u64BasicInfo);
2134
2135 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2136 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2137 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2138 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2139
2140 /* Load this VMCS as the current VMCS. */
2141 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2142 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2143 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2144
2145 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2146 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2147 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2148
2149 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2150 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2151 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2152
2153 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2154 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2155 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2156
2157 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2158 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2159 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2160
2161 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2162 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2163 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2164
2165#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2166 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2167 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2168 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2169#endif
2170
2171 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2172 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2173 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2174 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2175
2176 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2177
2178 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2179 }
2180
2181 return VINF_SUCCESS;
2182}
2183
2184
2185/**
2186 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2187 * the VMCS.
2188 *
2189 * @returns VBox status code.
2190 * @param pVM Pointer to the VM.
2191 * @param pVCpu Pointer to the VMCPU.
2192 */
2193DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2194{
2195 RTCCUINTREG uReg = ASMGetCR0();
2196 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2197 AssertRCReturn(rc, rc);
2198
2199#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2200 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2201 if (HMVMX_IS_64BIT_HOST_MODE())
2202 {
2203 uint64_t uRegCR3 = HMR0Get64bitCR3();
2204 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2205 }
2206 else
2207#endif
2208 {
2209 uReg = ASMGetCR3();
2210 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2211 }
2212 AssertRCReturn(rc, rc);
2213
2214 uReg = ASMGetCR4();
2215 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2216 AssertRCReturn(rc, rc);
2217 return rc;
2218}
2219
2220
2221/**
2222 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2223 * the host-state area in the VMCS.
2224 *
2225 * @returns VBox status code.
2226 * @param pVM Pointer to the VM.
2227 * @param pVCpu Pointer to the VMCPU.
2228 */
2229DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2230{
2231 int rc = VERR_INTERNAL_ERROR_5;
2232 RTSEL uSelDS = 0;
2233 RTSEL uSelES = 0;
2234 RTSEL uSelFS = 0;
2235 RTSEL uSelGS = 0;
2236 RTSEL uSelTR = 0;
2237
2238 /*
2239 * Host DS, ES, FS and GS segment registers.
2240 */
2241#if HC_ARCH_BITS == 64
2242 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2243 uSelDS = ASMGetDS();
2244 uSelES = ASMGetES();
2245 uSelFS = ASMGetFS();
2246 uSelGS = ASMGetGS();
2247#endif
2248
2249 /*
2250 * Host CS and SS segment registers.
2251 */
2252 RTSEL uSelCS;
2253 RTSEL uSelSS;
2254#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2255 if (HMVMX_IS_64BIT_HOST_MODE())
2256 {
2257 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2258 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2259 }
2260 else
2261 {
2262 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2263 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2264 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2265 }
2266#else
2267 uSelCS = ASMGetCS();
2268 uSelSS = ASMGetSS();
2269#endif
2270
2271 /*
2272 * Host TR segment register.
2273 */
2274 uSelTR = ASMGetTR();
2275
2276#if HC_ARCH_BITS == 64
2277 /*
2278 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2279 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2280 */
2281 if (uSelDS & (X86_SEL_RPL | X86_SEL_LDT))
2282 {
2283 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_DS;
2284 pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS;
2285 uSelDS = 0;
2286 }
2287 if (uSelES & (X86_SEL_RPL | X86_SEL_LDT))
2288 {
2289 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_ES;
2290 pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES;
2291 uSelES = 0;
2292 }
2293 if (uSelFS & (X86_SEL_RPL | X86_SEL_LDT))
2294 {
2295 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_FS;
2296 pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS;
2297 uSelFS = 0;
2298 }
2299 if (uSelGS & (X86_SEL_RPL | X86_SEL_LDT))
2300 {
2301 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_GS;
2302 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS;
2303 uSelGS = 0;
2304 }
2305#endif
2306
2307 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2308 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2309 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2310 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2311 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2312 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2313 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2314 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2315 Assert(uSelCS);
2316 Assert(uSelTR);
2317
2318 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2319#if 0
2320 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2321 Assert(uSelSS != 0);
2322#endif
2323
2324 /* Write these host selector fields into the host-state area in the VMCS. */
2325 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2326 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2327#if HC_ARCH_BITS == 64
2328 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2329 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2330 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2331 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2332#endif
2333 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2334
2335 /*
2336 * Host GDTR and IDTR.
2337 */
2338 RTGDTR Gdtr;
2339 RT_ZERO(Gdtr);
2340#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2341 if (HMVMX_IS_64BIT_HOST_MODE())
2342 {
2343 X86XDTR64 Gdtr64;
2344 X86XDTR64 Idtr64;
2345 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2346 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
2347 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
2348
2349 Gdtr.cbGdt = Gdtr64.cb;
2350 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2351 }
2352 else
2353#endif
2354 {
2355 RTIDTR Idtr;
2356 ASMGetGDTR(&Gdtr);
2357 ASMGetIDTR(&Idtr);
2358 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
2359 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
2360
2361#if HC_ARCH_BITS == 64
2362 /*
2363 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
2364 * maximum limit (0xffff) on every VM-exit.
2365 */
2366 if (Gdtr.cbGdt != 0xffff)
2367 {
2368 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
2369 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
2370 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2371 }
2372
2373 /*
2374 * IDT limit is practically 0xfff. Therefore if the host has the limit as 0xfff, VT-x bloating the limit to 0xffff
2375 * is not a problem as it's not possible to get at them anyway. See Intel spec. 6.14.1 "64-Bit Mode IDT" and
2376 * Intel spec. 6.2 "Exception and Interrupt Vectors".
2377 */
2378 if (Idtr.cbIdt < 0x0fff)
2379 {
2380 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
2381 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
2382 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
2383 }
2384#endif
2385 }
2386
2387 /*
2388 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2389 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2390 */
2391 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2392 {
2393 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2394 return VERR_VMX_INVALID_HOST_STATE;
2395 }
2396
2397 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2398#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2399 if (HMVMX_IS_64BIT_HOST_MODE())
2400 {
2401 /* We need the 64-bit TR base for hybrid darwin. */
2402 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2403 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2404 }
2405 else
2406#endif
2407 {
2408 uintptr_t uTRBase;
2409#if HC_ARCH_BITS == 64
2410 uTRBase = X86DESC64_BASE(pDesc);
2411
2412 /*
2413 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
2414 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
2415 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
2416 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
2417 *
2418 * [1] See Intel spec. 3.5 "System Descriptor Types".
2419 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
2420 */
2421 Assert(pDesc->System.u4Type == 11);
2422 if ( pDesc->System.u16LimitLow != 0x67
2423 || pDesc->System.u4LimitHigh)
2424 {
2425 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
2426 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
2427
2428 /* Store the GDTR here as we need it while restoring TR. */
2429 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2430 }
2431#else
2432 uTRBase = X86DESC_BASE(pDesc);
2433#endif
2434 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2435 }
2436 AssertRCReturn(rc, rc);
2437
2438 /*
2439 * Host FS base and GS base.
2440 */
2441#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2442 if (HMVMX_IS_64BIT_HOST_MODE())
2443 {
2444 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2445 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2446 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
2447 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
2448
2449# if HC_ARCH_BITS == 64
2450 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
2451 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
2452 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
2453 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
2454 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
2455# endif
2456 }
2457#endif
2458 return rc;
2459}
2460
2461
2462/**
2463 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2464 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2465 * the host after every successful VM exit.
2466 *
2467 * @returns VBox status code.
2468 * @param pVM Pointer to the VM.
2469 * @param pVCpu Pointer to the VMCPU.
2470 */
2471DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2472{
2473 AssertPtr(pVCpu);
2474 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2475
2476 int rc = VINF_SUCCESS;
2477#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2478 PVMXAUTOMSR pHostMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvHostMsr;
2479 uint32_t cHostMsrs = 0;
2480 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2481
2482 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2483 {
2484 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
2485
2486# if HC_ARCH_BITS == 64
2487 /* Paranoia. 64-bit code requires these bits to be set always. */
2488 Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
2489
2490 /*
2491 * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation.
2492 * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for
2493 * some reason (e.g. allow transparent reads) we would activate the code below.
2494 */
2495# if 0
2496 /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */
2497 Assert(u64HostEfer & (MSR_K6_EFER_NXE));
2498 /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has.
2499 See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */
2500 if (CPUMIsGuestInLongMode(pVCpu))
2501 {
2502 uint64_t u64GuestEfer;
2503 rc = CPUMQueryGuestMsr(pVCpu, MSR_K6_EFER, &u64GuestEfer);
2504 AssertRC(rc);
2505
2506 if ((u64HostEfer & MSR_K6_EFER_SCE) != (u64GuestEfer & MSR_K6_EFER_SCE))
2507 {
2508 pHostMsr->u32Msr = MSR_K6_EFER;
2509 pHostMsr->u32Reserved = 0;
2510 pHostMsr->u64Value = u64HostEfer;
2511 pHostMsr++; cHostMsrs++;
2512 }
2513 }
2514# endif
2515# else /* HC_ARCH_BITS != 64 */
2516 pHostMsr->u32Msr = MSR_K6_EFER;
2517 pHostMsr->u32Reserved = 0;
2518# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2519 if (CPUMIsGuestInLongMode(pVCpu))
2520 {
2521 /* Must match the EFER value in our 64 bits switcher. */
2522 pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2523 }
2524 else
2525# endif
2526 pHostMsr->u64Value = u64HostEfer;
2527 pHostMsr++; cHostMsrs++;
2528# endif /* HC_ARCH_BITS == 64 */
2529 }
2530
2531# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2532 if (HMVMX_IS_64BIT_HOST_MODE())
2533 {
2534 pHostMsr->u32Msr = MSR_K6_STAR;
2535 pHostMsr->u32Reserved = 0;
2536 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2537 pHostMsr++; cHostMsrs++;
2538 pHostMsr->u32Msr = MSR_K8_LSTAR;
2539 pHostMsr->u32Reserved = 0;
2540 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
2541 pHostMsr++; cHostMsrs++;
2542 pHostMsr->u32Msr = MSR_K8_SF_MASK;
2543 pHostMsr->u32Reserved = 0;
2544 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2545 pHostMsr++; cHostMsrs++;
2546 pHostMsr->u32Msr = MSR_K8_KERNEL_GS_BASE;
2547 pHostMsr->u32Reserved = 0;
2548 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2549 pHostMsr++; cHostMsrs++;
2550 }
2551# endif
2552
2553 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2554 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc)))
2555 {
2556 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc)));
2557 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_HOST_MSR_STORAGE;
2558 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2559 }
2560
2561 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
2562#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2563
2564 /*
2565 * Host Sysenter MSRs.
2566 */
2567 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2568 AssertRCReturn(rc, rc);
2569#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2570 if (HMVMX_IS_64BIT_HOST_MODE())
2571 {
2572 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2573 AssertRCReturn(rc, rc);
2574 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2575 }
2576 else
2577 {
2578 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2579 AssertRCReturn(rc, rc);
2580 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2581 }
2582#elif HC_ARCH_BITS == 32
2583 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2584 AssertRCReturn(rc, rc);
2585 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2586#else
2587 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2588 AssertRCReturn(rc, rc);
2589 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2590#endif
2591 AssertRCReturn(rc, rc);
2592
2593 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2594 * hmR0VmxSetupExitCtls() !! */
2595 return rc;
2596}
2597
2598
2599/**
2600 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2601 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2602 * controls".
2603 *
2604 * @returns VBox status code.
2605 * @param pVCpu Pointer to the VMCPU.
2606 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2607 * out-of-sync. Make sure to update the required fields
2608 * before using them.
2609 *
2610 * @remarks No-long-jump zone!!!
2611 */
2612DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2613{
2614 int rc = VINF_SUCCESS;
2615 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2616 {
2617 PVM pVM = pVCpu->CTX_SUFF(pVM);
2618 uint32_t val = pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2619 uint32_t zap = pVM->hm.s.vmx.msr.VmxEntry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2620
2621 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2622 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
2623
2624 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2625 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2626 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
2627 else
2628 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
2629
2630 /*
2631 * The following should not be set (since we're not in SMM mode):
2632 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
2633 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
2634 */
2635
2636 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
2637 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
2638 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
2639
2640 if ((val & zap) != val)
2641 {
2642 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2643 pVM->hm.s.vmx.msr.VmxEntry.n.disallowed0, val, zap));
2644 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
2645 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2646 }
2647
2648 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
2649 AssertRCReturn(rc, rc);
2650
2651 /* Update VCPU with the currently set VM-exit controls. */
2652 pVCpu->hm.s.vmx.u32EntryCtls = val;
2653 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2654 }
2655 return rc;
2656}
2657
2658
2659/**
2660 * Sets up the VM-exit controls in the VMCS.
2661 *
2662 * @returns VBox status code.
2663 * @param pVM Pointer to the VM.
2664 * @param pVCpu Pointer to the VMCPU.
2665 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2666 * out-of-sync. Make sure to update the required fields
2667 * before using them.
2668 *
2669 * @remarks requires EFER.
2670 */
2671DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2672{
2673 int rc = VINF_SUCCESS;
2674 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2675 {
2676 PVM pVM = pVCpu->CTX_SUFF(pVM);
2677 uint32_t val = pVM->hm.s.vmx.msr.VmxExit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2678 uint32_t zap = pVM->hm.s.vmx.msr.VmxExit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2679
2680 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2681 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
2682
2683 /*
2684 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
2685 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
2686 */
2687#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2688 if (HMVMX_IS_64BIT_HOST_MODE())
2689 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
2690 else
2691 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2692#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2693 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2694 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2695 else
2696 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2697#endif
2698
2699 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2700 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
2701
2702 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
2703 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
2704 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
2705 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
2706 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
2707
2708 if (pVM->hm.s.vmx.msr.VmxExit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
2709 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
2710
2711 if ((val & zap) != val)
2712 {
2713 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2714 pVM->hm.s.vmx.msr.VmxExit.n.disallowed0, val, zap));
2715 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
2716 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2717 }
2718
2719 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
2720 AssertRCReturn(rc, rc);
2721
2722 /* Update VCPU with the currently set VM-exit controls. */
2723 pVCpu->hm.s.vmx.u32ExitCtls = val;
2724 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2725 }
2726 return rc;
2727}
2728
2729
2730/**
2731 * Loads the guest APIC and related state.
2732 *
2733 * @returns VBox status code.
2734 * @param pVM Pointer to the VM.
2735 * @param pVCpu Pointer to the VMCPU.
2736 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2737 * out-of-sync. Make sure to update the required fields
2738 * before using them.
2739 */
2740DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2741{
2742 int rc = VINF_SUCCESS;
2743 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2744 {
2745 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2746 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2747 {
2748 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2749
2750 bool fPendingIntr = false;
2751 uint8_t u8Tpr = 0;
2752 uint8_t u8PendingIntr = 0;
2753 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
2754 AssertRCReturn(rc, rc);
2755
2756 /*
2757 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
2758 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
2759 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2760 * the interrupt when we VM-exit for other reasons.
2761 */
2762 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2763 uint32_t u32TprThreshold = 0;
2764 if (fPendingIntr)
2765 {
2766 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2767 const uint8_t u8PendingPriority = (u8PendingIntr >> 4);
2768 const uint8_t u8TprPriority = (u8Tpr >> 4) & 7;
2769 if (u8PendingPriority <= u8TprPriority)
2770 u32TprThreshold = u8PendingPriority;
2771 else
2772 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
2773 }
2774 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2775
2776 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2777 AssertRCReturn(rc, rc);
2778 }
2779
2780 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2781 }
2782 return rc;
2783}
2784
2785
2786/**
2787 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
2788 *
2789 * @returns Guest's interruptibility-state.
2790 * @param pVCpu Pointer to the VMCPU.
2791 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2792 * out-of-sync. Make sure to update the required fields
2793 * before using them.
2794 *
2795 * @remarks No-long-jump zone!!!
2796 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2797 */
2798DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2799{
2800 /*
2801 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2802 * inhibit interrupts or clear any existing interrupt-inhibition.
2803 */
2804 uint32_t uIntrState = 0;
2805 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2806 {
2807 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2808 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
2809 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2810 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2811 {
2812 /*
2813 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2814 * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct.
2815 */
2816 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2817 }
2818 else if (pMixedCtx->eflags.Bits.u1IF)
2819 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2820 else
2821 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2822 }
2823 return uIntrState;
2824}
2825
2826
2827/**
2828 * Loads the guest's interruptibility-state into the guest-state area in the
2829 * VMCS.
2830 *
2831 * @returns VBox status code.
2832 * @param pVCpu Pointer to the VMCPU.
2833 * @param uIntrState The interruptibility-state to set.
2834 */
2835static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
2836{
2837 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
2838 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2839 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2840 AssertRCReturn(rc, rc);
2841 return rc;
2842}
2843
2844
2845/**
2846 * Loads the guest's RIP into the guest-state area in the VMCS.
2847 *
2848 * @returns VBox status code.
2849 * @param pVCpu Pointer to the VMCPU.
2850 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2851 * out-of-sync. Make sure to update the required fields
2852 * before using them.
2853 *
2854 * @remarks No-long-jump zone!!!
2855 */
2856static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2857{
2858 int rc = VINF_SUCCESS;
2859 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2860 {
2861 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2862 AssertRCReturn(rc, rc);
2863 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2864 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#x\n", pMixedCtx->rip, pVCpu->hm.s.fContextUseFlags));
2865 }
2866 return rc;
2867}
2868
2869
2870/**
2871 * Loads the guest's RSP into the guest-state area in the VMCS.
2872 *
2873 * @returns VBox status code.
2874 * @param pVCpu Pointer to the VMCPU.
2875 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2876 * out-of-sync. Make sure to update the required fields
2877 * before using them.
2878 *
2879 * @remarks No-long-jump zone!!!
2880 */
2881static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2882{
2883 int rc = VINF_SUCCESS;
2884 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2885 {
2886 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2887 AssertRCReturn(rc, rc);
2888 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2889 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp));
2890 }
2891 return rc;
2892}
2893
2894
2895/**
2896 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2897 *
2898 * @returns VBox status code.
2899 * @param pVCpu Pointer to the VMCPU.
2900 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2901 * out-of-sync. Make sure to update the required fields
2902 * before using them.
2903 *
2904 * @remarks No-long-jump zone!!!
2905 */
2906static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2907{
2908 int rc = VINF_SUCCESS;
2909 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2910 {
2911 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2912 Let us assert it as such and use 32-bit VMWRITE. */
2913 Assert(!(pMixedCtx->rflags.u64 >> 32));
2914 X86EFLAGS Eflags = pMixedCtx->eflags;
2915 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2916 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2917
2918 /*
2919 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2920 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2921 */
2922 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2923 {
2924 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2925 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2926 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
2927 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2928 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2929 }
2930
2931 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
2932 AssertRCReturn(rc, rc);
2933
2934 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2935 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32));
2936 }
2937 return rc;
2938}
2939
2940
2941/**
2942 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
2943 *
2944 * @returns VBox status code.
2945 * @param pVCpu Pointer to the VMCPU.
2946 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2947 * out-of-sync. Make sure to update the required fields
2948 * before using them.
2949 *
2950 * @remarks No-long-jump zone!!!
2951 */
2952DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2953{
2954 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
2955 AssertRCReturn(rc, rc);
2956 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
2957 AssertRCReturn(rc, rc);
2958 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
2959 AssertRCReturn(rc, rc);
2960 return rc;
2961}
2962
2963
2964/**
2965 * Loads the guest CR0 control register into the guest-state area in the VMCS.
2966 * CR0 is partially shared with the host and we have to consider the FPU bits.
2967 *
2968 * @returns VBox status code.
2969 * @param pVM Pointer to the VM.
2970 * @param pVCpu Pointer to the VMCPU.
2971 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2972 * out-of-sync. Make sure to update the required fields
2973 * before using them.
2974 *
2975 * @remarks No-long-jump zone!!!
2976 */
2977static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2978{
2979 /*
2980 * Guest CR0.
2981 * Guest FPU.
2982 */
2983 int rc = VINF_SUCCESS;
2984 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2985 {
2986 Assert(!(pMixedCtx->cr0 >> 32));
2987 uint32_t u32GuestCR0 = pMixedCtx->cr0;
2988 PVM pVM = pVCpu->CTX_SUFF(pVM);
2989
2990 /* The guest's view (read access) of its CR0 is unblemished. */
2991 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
2992 AssertRCReturn(rc, rc);
2993 Log4(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
2994
2995 /* Setup VT-x's view of the guest CR0. */
2996 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2997 if (pVM->hm.s.fNestedPaging)
2998 {
2999 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3000 {
3001 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
3002 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3003 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3004 }
3005 else
3006 {
3007 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
3008 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3009 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3010 }
3011
3012 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3013 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3014 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3015
3016 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3017 AssertRCReturn(rc, rc);
3018 }
3019 else
3020 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3021
3022 /*
3023 * Guest FPU bits.
3024 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3025 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3026 */
3027 u32GuestCR0 |= X86_CR0_NE;
3028 bool fInterceptNM = false;
3029 if (CPUMIsGuestFPUStateActive(pVCpu))
3030 {
3031 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3032 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3033 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3034 }
3035 else
3036 {
3037 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3038 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3039 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3040 }
3041
3042 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3043 bool fInterceptMF = false;
3044 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3045 fInterceptMF = true;
3046
3047 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3048 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3049 {
3050 Assert(PDMVmmDevHeapIsEnabled(pVM));
3051 Assert(pVM->hm.s.vmx.pRealModeTSS);
3052 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3053 fInterceptNM = true;
3054 fInterceptMF = true;
3055 }
3056 else
3057 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3058
3059 if (fInterceptNM)
3060 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3061 else
3062 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3063
3064 if (fInterceptMF)
3065 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3066 else
3067 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3068
3069 /* Additional intercepts for debugging, define these yourself explicitly. */
3070#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3071 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3072 | RT_BIT(X86_XCPT_BP)
3073 | RT_BIT(X86_XCPT_DB)
3074 | RT_BIT(X86_XCPT_DE)
3075 | RT_BIT(X86_XCPT_NM)
3076 | RT_BIT(X86_XCPT_UD)
3077 | RT_BIT(X86_XCPT_NP)
3078 | RT_BIT(X86_XCPT_SS)
3079 | RT_BIT(X86_XCPT_GP)
3080 | RT_BIT(X86_XCPT_PF)
3081 | RT_BIT(X86_XCPT_MF)
3082 ;
3083#elif defined(HMVMX_ALWAYS_TRAP_PF)
3084 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3085#endif
3086
3087 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3088
3089 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3090 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 & pVM->hm.s.vmx.msr.u64Cr0Fixed1);
3091 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 | pVM->hm.s.vmx.msr.u64Cr0Fixed1);
3092 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3093 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3094 else
3095 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3096
3097 u32GuestCR0 |= uSetCR0;
3098 u32GuestCR0 &= uZapCR0;
3099 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3100
3101 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
3102 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3103 AssertRCReturn(rc, rc);
3104 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3105 AssertRCReturn(rc, rc);
3106 Log4(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
3107
3108 /*
3109 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3110 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3111 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3112 */
3113 uint32_t u32CR0Mask = 0;
3114 u32CR0Mask = X86_CR0_PE
3115 | X86_CR0_NE
3116 | X86_CR0_WP
3117 | X86_CR0_PG
3118 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3119 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3120 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3121 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3122 u32CR0Mask &= ~X86_CR0_PE;
3123 if (pVM->hm.s.fNestedPaging)
3124 u32CR0Mask &= ~X86_CR0_WP;
3125
3126 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3127 if (fInterceptNM)
3128 u32CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
3129 else
3130 u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
3131
3132 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3133 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3134 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3135 AssertRCReturn(rc, rc);
3136
3137 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
3138 }
3139 return rc;
3140}
3141
3142
3143/**
3144 * Loads the guest control registers (CR3, CR4) into the guest-state area
3145 * in the VMCS.
3146 *
3147 * @returns VBox status code.
3148 * @param pVM Pointer to the VM.
3149 * @param pVCpu Pointer to the VMCPU.
3150 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3151 * out-of-sync. Make sure to update the required fields
3152 * before using them.
3153 *
3154 * @remarks No-long-jump zone!!!
3155 */
3156static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3157{
3158 int rc = VINF_SUCCESS;
3159 PVM pVM = pVCpu->CTX_SUFF(pVM);
3160
3161 /*
3162 * Guest CR2.
3163 * It's always loaded in the assembler code. Nothing to do here.
3164 */
3165
3166 /*
3167 * Guest CR3.
3168 */
3169 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
3170 {
3171 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3172 if (pVM->hm.s.fNestedPaging)
3173 {
3174 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3175
3176 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3177 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3178 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3179 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3180
3181 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3182 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3183 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3184
3185 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3186 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3187 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3188 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3189
3190 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3191 AssertRCReturn(rc, rc);
3192 Log4(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3193
3194 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3195 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3196 {
3197 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3198 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3199 {
3200 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3201 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3202 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3203 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3204 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3205 }
3206
3207 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3208 have Unrestricted Execution to handle the guest when it's not using paging. */
3209 GCPhysGuestCR3 = pMixedCtx->cr3;
3210 }
3211 else
3212 {
3213 /*
3214 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3215 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3216 * EPT takes care of translating it to host-physical addresses.
3217 */
3218 RTGCPHYS GCPhys;
3219 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3220 Assert(PDMVmmDevHeapIsEnabled(pVM));
3221
3222 /* We obtain it here every time as the guest could have relocated this PCI region. */
3223 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3224 AssertRCReturn(rc, rc);
3225
3226 GCPhysGuestCR3 = GCPhys;
3227 }
3228
3229 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3));
3230 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3231 }
3232 else
3233 {
3234 /* Non-nested paging case, just use the hypervisor's CR3. */
3235 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3236
3237 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3));
3238 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3239 }
3240 AssertRCReturn(rc, rc);
3241
3242 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
3243 }
3244
3245 /*
3246 * Guest CR4.
3247 */
3248 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
3249 {
3250 Assert(!(pMixedCtx->cr4 >> 32));
3251 uint32_t u32GuestCR4 = pMixedCtx->cr4;
3252
3253 /* The guest's view of its CR4 is unblemished. */
3254 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3255 AssertRCReturn(rc, rc);
3256 Log4(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
3257
3258 /* Setup VT-x's view of the guest CR4. */
3259 /*
3260 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3261 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3262 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3263 */
3264 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3265 {
3266 Assert(pVM->hm.s.vmx.pRealModeTSS);
3267 Assert(PDMVmmDevHeapIsEnabled(pVM));
3268 u32GuestCR4 &= ~X86_CR4_VME;
3269 }
3270
3271 if (pVM->hm.s.fNestedPaging)
3272 {
3273 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
3274 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3275 {
3276 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3277 u32GuestCR4 |= X86_CR4_PSE;
3278 /* Our identity mapping is a 32 bits page directory. */
3279 u32GuestCR4 &= ~X86_CR4_PAE;
3280 }
3281 /* else use guest CR4.*/
3282 }
3283 else
3284 {
3285 /*
3286 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3287 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3288 */
3289 switch (pVCpu->hm.s.enmShadowMode)
3290 {
3291 case PGMMODE_REAL: /* Real-mode. */
3292 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3293 case PGMMODE_32_BIT: /* 32-bit paging. */
3294 {
3295 u32GuestCR4 &= ~X86_CR4_PAE;
3296 break;
3297 }
3298
3299 case PGMMODE_PAE: /* PAE paging. */
3300 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3301 {
3302 u32GuestCR4 |= X86_CR4_PAE;
3303 break;
3304 }
3305
3306 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3307 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3308#ifdef VBOX_ENABLE_64_BITS_GUESTS
3309 break;
3310#endif
3311 default:
3312 AssertFailed();
3313 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3314 }
3315 }
3316
3317 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3318 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 & pVM->hm.s.vmx.msr.u64Cr4Fixed1);
3319 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 | pVM->hm.s.vmx.msr.u64Cr4Fixed1);
3320 u32GuestCR4 |= uSetCR4;
3321 u32GuestCR4 &= uZapCR4;
3322
3323 /* Write VT-x's view of the guest CR4 into the VMCS. */
3324 Log4(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
3325 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3326 AssertRCReturn(rc, rc);
3327
3328 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
3329 uint32_t u32CR4Mask = 0;
3330 u32CR4Mask = X86_CR4_VME
3331 | X86_CR4_PAE
3332 | X86_CR4_PGE
3333 | X86_CR4_PSE
3334 | X86_CR4_VMXE;
3335 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
3336 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
3337 AssertRCReturn(rc, rc);
3338
3339 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
3340 }
3341 return rc;
3342}
3343
3344
3345/**
3346 * Loads the guest debug registers into the guest-state area in the VMCS.
3347 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
3348 *
3349 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
3350 *
3351 * @returns VBox status code.
3352 * @param pVCpu Pointer to the VMCPU.
3353 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3354 * out-of-sync. Make sure to update the required fields
3355 * before using them.
3356 *
3357 * @remarks No-long-jump zone!!!
3358 */
3359static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3360{
3361 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
3362 return VINF_SUCCESS;
3363
3364#ifdef VBOX_STRICT
3365 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3366 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
3367 {
3368 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3369 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
3370 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
3371 }
3372#endif
3373
3374 int rc;
3375 PVM pVM = pVCpu->CTX_SUFF(pVM);
3376 bool fInterceptDB = false;
3377 bool fInterceptMovDRx = false;
3378 if (pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu))
3379 {
3380 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3381 if (pVM->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
3382 {
3383 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
3384 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3385 AssertRCReturn(rc, rc);
3386 Assert(fInterceptDB == false);
3387 }
3388 else
3389 {
3390 pMixedCtx->eflags.u32 |= X86_EFL_TF;
3391 pVCpu->hm.s.fClearTrapFlag = true;
3392 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RFLAGS;
3393 fInterceptDB = true;
3394 }
3395 }
3396
3397 if (fInterceptDB || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
3398 {
3399 /*
3400 * Use the combined guest and host DRx values found in the hypervisor
3401 * register set because the debugger has breakpoints active or someone
3402 * is single stepping on the host side without a monitor trap flag.
3403 *
3404 * Note! DBGF expects a clean DR6 state before executing guest code.
3405 */
3406 if (!CPUMIsHyperDebugStateActive(pVCpu))
3407 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
3408 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
3409 Assert(CPUMIsHyperDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
3410
3411 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
3412 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
3413 AssertRCReturn(rc, rc);
3414
3415 fInterceptDB = true;
3416 fInterceptMovDRx = true;
3417 }
3418 else
3419 {
3420 /*
3421 * If the guest has enabled debug registers, we need to load them prior to
3422 * executing guest code so they'll trigger at the right time.
3423 */
3424 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
3425 {
3426 if (!CPUMIsGuestDebugStateActive(pVCpu))
3427 {
3428 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
3429 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3430 }
3431 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
3432 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
3433 }
3434 /*
3435 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
3436 * must intercept #DB in order to maintain a correct DR6 guest value.
3437 */
3438 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3439 {
3440 fInterceptMovDRx = true;
3441 fInterceptDB = true;
3442 }
3443
3444 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
3445 AssertRCReturn(rc, rc);
3446 }
3447
3448 /*
3449 * Update the exception bitmap regarding intercepting #DB generated by the guest.
3450 */
3451 if (fInterceptDB)
3452 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3453 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3454 {
3455#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3456 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3457#endif
3458 }
3459 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3460 AssertRCReturn(rc, rc);
3461
3462 /*
3463 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
3464 */
3465 if (fInterceptMovDRx)
3466 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3467 else
3468 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3469 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3470 AssertRCReturn(rc, rc);
3471
3472 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3473 return VINF_SUCCESS;
3474}
3475
3476
3477#ifdef VBOX_STRICT
3478/**
3479 * Strict function to validate segment registers.
3480 *
3481 * @remarks ASSUMES CR0 is up to date.
3482 */
3483static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3484{
3485 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3486 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
3487 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
3488 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3489 && ( !CPUMIsGuestInRealModeEx(pCtx)
3490 && !CPUMIsGuestInV86ModeEx(pCtx)))
3491 {
3492 /* Protected mode checks */
3493 /* CS */
3494 Assert(pCtx->cs.Attr.n.u1Present);
3495 Assert(!(pCtx->cs.Attr.u & 0xf00));
3496 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3497 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3498 || !(pCtx->cs.Attr.n.u1Granularity));
3499 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3500 || (pCtx->cs.Attr.n.u1Granularity));
3501 /* CS cannot be loaded with NULL in protected mode. */
3502 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
3503 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3504 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3505 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3506 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3507 else
3508 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3509 /* SS */
3510 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3511 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3512 if ( !(pCtx->cr0 & X86_CR0_PE)
3513 || pCtx->cs.Attr.n.u4Type == 3)
3514 {
3515 Assert(!pCtx->ss.Attr.n.u2Dpl);
3516 }
3517 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
3518 {
3519 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3520 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3521 Assert(pCtx->ss.Attr.n.u1Present);
3522 Assert(!(pCtx->ss.Attr.u & 0xf00));
3523 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3524 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3525 || !(pCtx->ss.Attr.n.u1Granularity));
3526 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3527 || (pCtx->ss.Attr.n.u1Granularity));
3528 }
3529 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3530 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
3531 {
3532 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3533 Assert(pCtx->ds.Attr.n.u1Present);
3534 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3535 Assert(!(pCtx->ds.Attr.u & 0xf00));
3536 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3537 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3538 || !(pCtx->ds.Attr.n.u1Granularity));
3539 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3540 || (pCtx->ds.Attr.n.u1Granularity));
3541 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3542 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3543 }
3544 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
3545 {
3546 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3547 Assert(pCtx->es.Attr.n.u1Present);
3548 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3549 Assert(!(pCtx->es.Attr.u & 0xf00));
3550 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3551 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3552 || !(pCtx->es.Attr.n.u1Granularity));
3553 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3554 || (pCtx->es.Attr.n.u1Granularity));
3555 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3556 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3557 }
3558 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
3559 {
3560 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3561 Assert(pCtx->fs.Attr.n.u1Present);
3562 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3563 Assert(!(pCtx->fs.Attr.u & 0xf00));
3564 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3565 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3566 || !(pCtx->fs.Attr.n.u1Granularity));
3567 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3568 || (pCtx->fs.Attr.n.u1Granularity));
3569 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3570 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3571 }
3572 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
3573 {
3574 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3575 Assert(pCtx->gs.Attr.n.u1Present);
3576 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3577 Assert(!(pCtx->gs.Attr.u & 0xf00));
3578 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3579 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3580 || !(pCtx->gs.Attr.n.u1Granularity));
3581 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3582 || (pCtx->gs.Attr.n.u1Granularity));
3583 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3584 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3585 }
3586 /* 64-bit capable CPUs. */
3587# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3588 Assert(!(pCtx->cs.u64Base >> 32));
3589 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3590 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3591 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3592# endif
3593 }
3594 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3595 || ( CPUMIsGuestInRealModeEx(pCtx)
3596 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3597 {
3598 /* Real and v86 mode checks. */
3599 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3600 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3601 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3602 {
3603 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3604 }
3605 else
3606 {
3607 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3608 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3609 }
3610
3611 /* CS */
3612 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3613 Assert(pCtx->cs.u32Limit == 0xffff);
3614 Assert(u32CSAttr == 0xf3);
3615 /* SS */
3616 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3617 Assert(pCtx->ss.u32Limit == 0xffff);
3618 Assert(u32SSAttr == 0xf3);
3619 /* DS */
3620 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3621 Assert(pCtx->ds.u32Limit == 0xffff);
3622 Assert(u32DSAttr == 0xf3);
3623 /* ES */
3624 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3625 Assert(pCtx->es.u32Limit == 0xffff);
3626 Assert(u32ESAttr == 0xf3);
3627 /* FS */
3628 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3629 Assert(pCtx->fs.u32Limit == 0xffff);
3630 Assert(u32FSAttr == 0xf3);
3631 /* GS */
3632 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3633 Assert(pCtx->gs.u32Limit == 0xffff);
3634 Assert(u32GSAttr == 0xf3);
3635 /* 64-bit capable CPUs. */
3636# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3637 Assert(!(pCtx->cs.u64Base >> 32));
3638 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3639 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3640 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3641# endif
3642 }
3643}
3644#endif /* VBOX_STRICT */
3645
3646
3647/**
3648 * Writes a guest segment register into the guest-state area in the VMCS.
3649 *
3650 * @returns VBox status code.
3651 * @param pVCpu Pointer to the VMCPU.
3652 * @param idxSel Index of the selector in the VMCS.
3653 * @param idxLimit Index of the segment limit in the VMCS.
3654 * @param idxBase Index of the segment base in the VMCS.
3655 * @param idxAccess Index of the access rights of the segment in the VMCS.
3656 * @param pSelReg Pointer to the segment selector.
3657 * @param pCtx Pointer to the guest-CPU context.
3658 *
3659 * @remarks No-long-jump zone!!!
3660 */
3661static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3662 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3663{
3664 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3665 AssertRCReturn(rc, rc);
3666 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3667 AssertRCReturn(rc, rc);
3668 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3669 AssertRCReturn(rc, rc);
3670
3671 uint32_t u32Access = pSelReg->Attr.u;
3672 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3673 {
3674 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3675 u32Access = 0xf3;
3676 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3677 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3678 }
3679 else
3680 {
3681 /*
3682 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3683 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3684 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3685 * loaded in protected-mode have their attribute as 0.
3686 */
3687 if (!u32Access)
3688 u32Access = X86DESCATTR_UNUSABLE;
3689 }
3690
3691 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3692 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3693 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3694
3695 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3696 AssertRCReturn(rc, rc);
3697 return rc;
3698}
3699
3700
3701/**
3702 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3703 * into the guest-state area in the VMCS.
3704 *
3705 * @returns VBox status code.
3706 * @param pVM Pointer to the VM.
3707 * @param pVCPU Pointer to the VMCPU.
3708 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3709 * out-of-sync. Make sure to update the required fields
3710 * before using them.
3711 *
3712 * @remarks ASSUMES CR0 is up to date (strict builds validation).
3713 * @remarks No-long-jump zone!!!
3714 */
3715static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3716{
3717 int rc = VERR_INTERNAL_ERROR_5;
3718 PVM pVM = pVCpu->CTX_SUFF(pVM);
3719
3720 /*
3721 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3722 */
3723 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3724 {
3725 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3726 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3727 {
3728 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
3729 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
3730 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
3731 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
3732 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
3733 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
3734 }
3735
3736#ifdef VBOX_WITH_REM
3737 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3738 {
3739 Assert(pVM->hm.s.vmx.pRealModeTSS);
3740 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3741 if ( pVCpu->hm.s.vmx.fWasInRealMode
3742 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
3743 {
3744 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3745 in real-mode (e.g. OpenBSD 4.0) */
3746 REMFlushTBs(pVM);
3747 Log4(("Load: Switch to protected mode detected!\n"));
3748 pVCpu->hm.s.vmx.fWasInRealMode = false;
3749 }
3750 }
3751#endif
3752 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3753 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
3754 AssertRCReturn(rc, rc);
3755 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3756 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
3757 AssertRCReturn(rc, rc);
3758 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3759 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
3760 AssertRCReturn(rc, rc);
3761 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3762 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
3763 AssertRCReturn(rc, rc);
3764 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3765 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
3766 AssertRCReturn(rc, rc);
3767 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3768 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
3769 AssertRCReturn(rc, rc);
3770
3771 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
3772 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
3773#ifdef VBOX_STRICT
3774 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
3775#endif
3776 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3777 }
3778
3779 /*
3780 * Guest TR.
3781 */
3782 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3783 {
3784 /*
3785 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3786 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3787 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3788 */
3789 uint16_t u16Sel = 0;
3790 uint32_t u32Limit = 0;
3791 uint64_t u64Base = 0;
3792 uint32_t u32AccessRights = 0;
3793
3794 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3795 {
3796 u16Sel = pMixedCtx->tr.Sel;
3797 u32Limit = pMixedCtx->tr.u32Limit;
3798 u64Base = pMixedCtx->tr.u64Base;
3799 u32AccessRights = pMixedCtx->tr.Attr.u;
3800 }
3801 else
3802 {
3803 Assert(pVM->hm.s.vmx.pRealModeTSS);
3804 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3805
3806 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3807 RTGCPHYS GCPhys;
3808 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3809 AssertRCReturn(rc, rc);
3810
3811 X86DESCATTR DescAttr;
3812 DescAttr.u = 0;
3813 DescAttr.n.u1Present = 1;
3814 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3815
3816 u16Sel = 0;
3817 u32Limit = HM_VTX_TSS_SIZE;
3818 u64Base = GCPhys; /* in real-mode phys = virt. */
3819 u32AccessRights = DescAttr.u;
3820 }
3821
3822 /* Validate. */
3823 Assert(!(u16Sel & RT_BIT(2)));
3824 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3825 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3826 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3827 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3828 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3829 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3830 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3831 Assert( (u32Limit & 0xfff) == 0xfff
3832 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3833 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
3834 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3835
3836 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
3837 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
3838 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
3839 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
3840
3841 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3842 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3843 }
3844
3845 /*
3846 * Guest GDTR.
3847 */
3848 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3849 {
3850 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
3851 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
3852
3853 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3854 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
3855 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3856 }
3857
3858 /*
3859 * Guest LDTR.
3860 */
3861 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3862 {
3863 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3864 uint32_t u32Access = 0;
3865 if (!pMixedCtx->ldtr.Attr.u)
3866 u32Access = X86DESCATTR_UNUSABLE;
3867 else
3868 u32Access = pMixedCtx->ldtr.Attr.u;
3869
3870 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
3871 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
3872 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
3873 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
3874
3875 /* Validate. */
3876 if (!(u32Access & X86DESCATTR_UNUSABLE))
3877 {
3878 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3879 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3880 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3881 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3882 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3883 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3884 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
3885 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3886 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
3887 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3888 }
3889
3890 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
3891 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3892 }
3893
3894 /*
3895 * Guest IDTR.
3896 */
3897 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3898 {
3899 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
3900 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
3901
3902 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3903 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
3904 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3905 }
3906
3907 return VINF_SUCCESS;
3908}
3909
3910
3911/**
3912 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3913 * areas. These MSRs will automatically be loaded to the host CPU on every
3914 * successful VM entry and stored from the host CPU on every successful VM exit.
3915 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3916 *
3917 * @returns VBox status code.
3918 * @param pVCpu Pointer to the VMCPU.
3919 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3920 * out-of-sync. Make sure to update the required fields
3921 * before using them.
3922 *
3923 * @remarks No-long-jump zone!!!
3924 */
3925static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3926{
3927 AssertPtr(pVCpu);
3928 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3929
3930 /*
3931 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3932 */
3933 int rc = VINF_SUCCESS;
3934 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3935 {
3936#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3937 PVM pVM = pVCpu->CTX_SUFF(pVM);
3938 PVMXAUTOMSR pGuestMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3939 uint32_t cGuestMsrs = 0;
3940
3941 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3942 /** @todo r=ramshankar: Optimize this further to do lazy restoration and only
3943 * when the guest really is in 64-bit mode. */
3944 bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3945 if (fSupportsLongMode)
3946 {
3947 pGuestMsr->u32Msr = MSR_K8_LSTAR;
3948 pGuestMsr->u32Reserved = 0;
3949 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
3950 pGuestMsr++; cGuestMsrs++;
3951 pGuestMsr->u32Msr = MSR_K6_STAR;
3952 pGuestMsr->u32Reserved = 0;
3953 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3954 pGuestMsr++; cGuestMsrs++;
3955 pGuestMsr->u32Msr = MSR_K8_SF_MASK;
3956 pGuestMsr->u32Reserved = 0;
3957 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
3958 pGuestMsr++; cGuestMsrs++;
3959 pGuestMsr->u32Msr = MSR_K8_KERNEL_GS_BASE;
3960 pGuestMsr->u32Reserved = 0;
3961 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
3962 pGuestMsr++; cGuestMsrs++;
3963 }
3964
3965 /*
3966 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3967 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3968 */
3969 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3970 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3971 {
3972 pGuestMsr->u32Msr = MSR_K8_TSC_AUX;
3973 pGuestMsr->u32Reserved = 0;
3974 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3975 AssertRCReturn(rc, rc);
3976 pGuestMsr++; cGuestMsrs++;
3977 }
3978
3979 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3980 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.u64Misc))
3981 {
3982 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3983 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
3984 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3985 }
3986
3987 /* Update the VCPU's copy of the guest MSR count. */
3988 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3989 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3990 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3991#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
3992
3993 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3994 }
3995
3996 /*
3997 * Guest Sysenter MSRs.
3998 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3999 * VM-exits on WRMSRs for these MSRs.
4000 */
4001 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
4002 {
4003 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4004 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
4005 }
4006 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
4007 {
4008 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4009 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
4010 }
4011 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
4012 {
4013 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4014 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
4015 }
4016
4017 return rc;
4018}
4019
4020
4021/**
4022 * Loads the guest activity state into the guest-state area in the VMCS.
4023 *
4024 * @returns VBox status code.
4025 * @param pVCpu Pointer to the VMCPU.
4026 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4027 * out-of-sync. Make sure to update the required fields
4028 * before using them.
4029 *
4030 * @remarks No-long-jump zone!!!
4031 */
4032static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
4033{
4034 /** @todo See if we can make use of other states, e.g.
4035 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4036 int rc = VINF_SUCCESS;
4037 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
4038 {
4039 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4040 AssertRCReturn(rc, rc);
4041 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
4042 }
4043 return rc;
4044}
4045
4046
4047/**
4048 * Sets up the appropriate function to run guest code.
4049 *
4050 * @returns VBox status code.
4051 * @param pVCpu Pointer to the VMCPU.
4052 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4053 * out-of-sync. Make sure to update the required fields
4054 * before using them.
4055 *
4056 * @remarks No-long-jump zone!!!
4057 */
4058static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4059{
4060 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4061 {
4062#ifndef VBOX_ENABLE_64_BITS_GUESTS
4063 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4064#endif
4065 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4066#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4067 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4068 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4069#else
4070 /* 64-bit host or hybrid host. */
4071 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4072#endif
4073 }
4074 else
4075 {
4076 /* Guest is not in long mode, use the 32-bit handler. */
4077 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4078 }
4079 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4080 return VINF_SUCCESS;
4081}
4082
4083
4084/**
4085 * Wrapper for running the guest code in VT-x.
4086 *
4087 * @returns VBox strict status code.
4088 * @param pVM Pointer to the VM.
4089 * @param pVCpu Pointer to the VMCPU.
4090 * @param pCtx Pointer to the guest-CPU context.
4091 *
4092 * @remarks No-long-jump zone!!!
4093 */
4094DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4095{
4096 /*
4097 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4098 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4099 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4100 */
4101 const bool fResumeVM = !!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4102 /** @todo Add stats for resume vs launch. */
4103#ifdef VBOX_WITH_KERNEL_USING_XMM
4104 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4105#else
4106 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4107#endif
4108}
4109
4110
4111/**
4112 * Reports world-switch error and dumps some useful debug info.
4113 *
4114 * @param pVM Pointer to the VM.
4115 * @param pVCpu Pointer to the VMCPU.
4116 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4117 * @param pCtx Pointer to the guest-CPU context.
4118 * @param pVmxTransient Pointer to the VMX transient structure (only
4119 * exitReason updated).
4120 */
4121static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4122{
4123 Assert(pVM);
4124 Assert(pVCpu);
4125 Assert(pCtx);
4126 Assert(pVmxTransient);
4127 HMVMX_ASSERT_PREEMPT_SAFE();
4128
4129 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4130 switch (rcVMRun)
4131 {
4132 case VERR_VMX_INVALID_VMXON_PTR:
4133 AssertFailed();
4134 break;
4135 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4136 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4137 {
4138 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4139 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4140 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4141 AssertRC(rc);
4142
4143 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4144 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
4145 Cannot do it here as we may have been long preempted. */
4146
4147#ifdef VBOX_STRICT
4148 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4149 pVmxTransient->uExitReason));
4150 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
4151 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
4152 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
4153 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
4154 else
4155 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
4156 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
4157 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
4158
4159 /* VMX control bits. */
4160 uint32_t u32Val;
4161 uint64_t u64Val;
4162 HMVMXHCUINTREG uHCReg;
4163 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4164 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4165 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4166 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4167 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4168 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4169 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4170 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4171 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4172 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4173 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4174 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4175 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4176 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4177 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4178 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4179 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4180 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4181 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4182 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4183 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4184 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4185 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4186 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4187 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4188 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4189 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4190 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4191 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4192 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4193 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4194 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4195 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4196 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4197 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4198 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4199 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4200 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4201 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4202 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4203 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4204 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4205
4206 /* Guest bits. */
4207 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4208 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
4209 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
4210 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
4211 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
4212 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
4213 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
4214 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
4215
4216 /* Host bits. */
4217 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
4218 Log4(("Host CR0 %#RHr\n", uHCReg));
4219 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
4220 Log4(("Host CR3 %#RHr\n", uHCReg));
4221 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
4222 Log4(("Host CR4 %#RHr\n", uHCReg));
4223
4224 RTGDTR HostGdtr;
4225 PCX86DESCHC pDesc;
4226 ASMGetGDTR(&HostGdtr);
4227 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
4228 Log4(("Host CS %#08x\n", u32Val));
4229 if (u32Val < HostGdtr.cbGdt)
4230 {
4231 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4232 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
4233 }
4234
4235 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
4236 Log4(("Host DS %#08x\n", u32Val));
4237 if (u32Val < HostGdtr.cbGdt)
4238 {
4239 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4240 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
4241 }
4242
4243 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
4244 Log4(("Host ES %#08x\n", u32Val));
4245 if (u32Val < HostGdtr.cbGdt)
4246 {
4247 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4248 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
4249 }
4250
4251 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
4252 Log4(("Host FS %#08x\n", u32Val));
4253 if (u32Val < HostGdtr.cbGdt)
4254 {
4255 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4256 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
4257 }
4258
4259 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
4260 Log4(("Host GS %#08x\n", u32Val));
4261 if (u32Val < HostGdtr.cbGdt)
4262 {
4263 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4264 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
4265 }
4266
4267 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
4268 Log4(("Host SS %#08x\n", u32Val));
4269 if (u32Val < HostGdtr.cbGdt)
4270 {
4271 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4272 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
4273 }
4274
4275 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
4276 Log4(("Host TR %#08x\n", u32Val));
4277 if (u32Val < HostGdtr.cbGdt)
4278 {
4279 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4280 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
4281 }
4282
4283 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
4284 Log4(("Host TR Base %#RHv\n", uHCReg));
4285 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
4286 Log4(("Host GDTR Base %#RHv\n", uHCReg));
4287 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
4288 Log4(("Host IDTR Base %#RHv\n", uHCReg));
4289 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
4290 Log4(("Host SYSENTER CS %#08x\n", u32Val));
4291 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
4292 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
4293 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
4294 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
4295 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
4296 Log4(("Host RSP %#RHv\n", uHCReg));
4297 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
4298 Log4(("Host RIP %#RHv\n", uHCReg));
4299# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4300 if (HMVMX_IS_64BIT_HOST_MODE())
4301 {
4302 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
4303 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
4304 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
4305 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
4306 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
4307 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
4308 }
4309# endif
4310#endif /* VBOX_STRICT */
4311 break;
4312 }
4313
4314 default:
4315 /* Impossible */
4316 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
4317 break;
4318 }
4319 NOREF(pVM);
4320}
4321
4322
4323#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4324#ifndef VMX_USE_CACHED_VMCS_ACCESSES
4325# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
4326#endif
4327#ifdef VBOX_STRICT
4328static bool hmR0VmxIsValidWriteField(uint32_t idxField)
4329{
4330 switch (idxField)
4331 {
4332 case VMX_VMCS_GUEST_RIP:
4333 case VMX_VMCS_GUEST_RSP:
4334 case VMX_VMCS_GUEST_SYSENTER_EIP:
4335 case VMX_VMCS_GUEST_SYSENTER_ESP:
4336 case VMX_VMCS_GUEST_GDTR_BASE:
4337 case VMX_VMCS_GUEST_IDTR_BASE:
4338 case VMX_VMCS_GUEST_CS_BASE:
4339 case VMX_VMCS_GUEST_DS_BASE:
4340 case VMX_VMCS_GUEST_ES_BASE:
4341 case VMX_VMCS_GUEST_FS_BASE:
4342 case VMX_VMCS_GUEST_GS_BASE:
4343 case VMX_VMCS_GUEST_SS_BASE:
4344 case VMX_VMCS_GUEST_LDTR_BASE:
4345 case VMX_VMCS_GUEST_TR_BASE:
4346 case VMX_VMCS_GUEST_CR3:
4347 return true;
4348 }
4349 return false;
4350}
4351
4352static bool hmR0VmxIsValidReadField(uint32_t idxField)
4353{
4354 switch (idxField)
4355 {
4356 /* Read-only fields. */
4357 case VMX_VMCS_RO_EXIT_QUALIFICATION:
4358 return true;
4359 }
4360 /* Remaining readable fields should also be writable. */
4361 return hmR0VmxIsValidWriteField(idxField);
4362}
4363#endif /* VBOX_STRICT */
4364
4365
4366/**
4367 * Executes the specified handler in 64-bit mode.
4368 *
4369 * @returns VBox status code.
4370 * @param pVM Pointer to the VM.
4371 * @param pVCpu Pointer to the VMCPU.
4372 * @param pCtx Pointer to the guest CPU context.
4373 * @param enmOp The operation to perform.
4374 * @param cbParam Number of parameters.
4375 * @param paParam Array of 32-bit parameters.
4376 */
4377VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
4378 uint32_t *paParam)
4379{
4380 int rc, rc2;
4381 PHMGLOBALCPUINFO pCpu;
4382 RTHCPHYS HCPhysCpuPage;
4383 RTCCUINTREG uOldEflags;
4384
4385 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4386 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
4387 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4388 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4389
4390#ifdef VBOX_STRICT
4391 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4392 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4393
4394 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4395 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4396#endif
4397
4398 /* Disable interrupts. */
4399 uOldEflags = ASMIntDisableFlags();
4400
4401#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4402 RTCPUID idHostCpu = RTMpCpuId();
4403 CPUMR0SetLApic(pVCpu, idHostCpu);
4404#endif
4405
4406 pCpu = HMR0GetCurrentCpu();
4407 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4408
4409 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4410 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
4411
4412 /* Leave VMX Root Mode. */
4413 VMXDisable();
4414
4415 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4416
4417 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4418 CPUMSetHyperEIP(pVCpu, enmOp);
4419 for (int i = (int)cbParam - 1; i >= 0; i--)
4420 CPUMPushHyper(pVCpu, paParam[i]);
4421
4422 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4423
4424 /* Call the switcher. */
4425 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4426 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4427
4428 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
4429 /* Make sure the VMX instructions don't cause #UD faults. */
4430 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4431
4432 /* Re-enter VMX Root Mode */
4433 rc2 = VMXEnable(HCPhysCpuPage);
4434 if (RT_FAILURE(rc2))
4435 {
4436 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4437 ASMSetFlags(uOldEflags);
4438 return rc2;
4439 }
4440
4441 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
4442 AssertRC(rc2);
4443 Assert(!(ASMGetFlags() & X86_EFL_IF));
4444 ASMSetFlags(uOldEflags);
4445 return rc;
4446}
4447
4448
4449/**
4450 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4451 * supporting 64-bit guests.
4452 *
4453 * @returns VBox status code.
4454 * @param fResume Whether to VMLAUNCH or VMRESUME.
4455 * @param pCtx Pointer to the guest-CPU context.
4456 * @param pCache Pointer to the VMCS cache.
4457 * @param pVM Pointer to the VM.
4458 * @param pVCpu Pointer to the VMCPU.
4459 */
4460DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4461{
4462 uint32_t aParam[6];
4463 PHMGLOBALCPUINFO pCpu = NULL;
4464 RTHCPHYS HCPhysCpuPage = 0;
4465 int rc = VERR_INTERNAL_ERROR_5;
4466
4467 pCpu = HMR0GetCurrentCpu();
4468 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4469
4470#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4471 pCache->uPos = 1;
4472 pCache->interPD = PGMGetInterPaeCR3(pVM);
4473 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4474#endif
4475
4476#ifdef VBOX_STRICT
4477 pCache->TestIn.HCPhysCpuPage = 0;
4478 pCache->TestIn.HCPhysVmcs = 0;
4479 pCache->TestIn.pCache = 0;
4480 pCache->TestOut.HCPhysVmcs = 0;
4481 pCache->TestOut.pCache = 0;
4482 pCache->TestOut.pCtx = 0;
4483 pCache->TestOut.eflags = 0;
4484#endif
4485
4486 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4487 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4488 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4489 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4490 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4491 aParam[5] = 0;
4492
4493#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4494 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4495 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4496#endif
4497 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
4498
4499#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4500 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4501 Assert(pCtx->dr[4] == 10);
4502 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4503#endif
4504
4505#ifdef VBOX_STRICT
4506 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4507 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4508 pVCpu->hm.s.vmx.HCPhysVmcs));
4509 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4510 pCache->TestOut.HCPhysVmcs));
4511 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4512 pCache->TestOut.pCache));
4513 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4514 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4515 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4516 pCache->TestOut.pCtx));
4517 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4518#endif
4519 return rc;
4520}
4521
4522
4523/**
4524 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4525 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4526 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4527 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4528 *
4529 * @returns VBox status code.
4530 * @param pVM Pointer to the VM.
4531 * @param pVCpu Pointer to the VMCPU.
4532 */
4533static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4534{
4535#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4536{ \
4537 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4538 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4539 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4540 ++cReadFields; \
4541}
4542
4543 AssertPtr(pVM);
4544 AssertPtr(pVCpu);
4545 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4546 uint32_t cReadFields = 0;
4547
4548 /*
4549 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
4550 * and serve to indicate exceptions to the rules.
4551 */
4552
4553 /* Guest-natural selector base fields. */
4554#if 0
4555 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
4556 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4557 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4558#endif
4559 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4560 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4561 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4562 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4563 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4564 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4565 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4566 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4567 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4568 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4569 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4570 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4571#if 0
4572 /* Unused natural width guest-state fields. */
4573 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
4574 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4575#endif
4576 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4577 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4578
4579 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4580#if 0
4581 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4582 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4583 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4584 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4585 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4586 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4587 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4588 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4589 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4590#endif
4591
4592 /* Natural width guest-state fields. */
4593 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
4594#if 0
4595 /* Currently unused field. */
4596 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
4597#endif
4598
4599 if (pVM->hm.s.fNestedPaging)
4600 {
4601 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4602 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
4603 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
4604 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4605 }
4606 else
4607 {
4608 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
4609 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4610 }
4611
4612#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4613 return VINF_SUCCESS;
4614}
4615
4616
4617/**
4618 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4619 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4620 * darwin, running 64-bit guests).
4621 *
4622 * @returns VBox status code.
4623 * @param pVCpu Pointer to the VMCPU.
4624 * @param idxField The VMCS field encoding.
4625 * @param u64Val 16, 32 or 64 bits value.
4626 */
4627VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4628{
4629 int rc;
4630 switch (idxField)
4631 {
4632 /*
4633 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4634 */
4635 /* 64-bit Control fields. */
4636 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4637 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4638 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4639 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4640 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4641 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4642 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4643 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4644 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4645 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4646 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4647 case VMX_VMCS64_CTRL_EPTP_FULL:
4648 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4649 /* 64-bit Guest-state fields. */
4650 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4651 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4652 case VMX_VMCS64_GUEST_PAT_FULL:
4653 case VMX_VMCS64_GUEST_EFER_FULL:
4654 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4655 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4656 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4657 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4658 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4659 /* 64-bit Host-state fields. */
4660 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4661 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4662 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4663 {
4664 rc = VMXWriteVmcs32(idxField, u64Val);
4665 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
4666 break;
4667 }
4668
4669 /*
4670 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4671 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4672 */
4673 /* Natural-width Guest-state fields. */
4674 case VMX_VMCS_GUEST_CR3:
4675 case VMX_VMCS_GUEST_ES_BASE:
4676 case VMX_VMCS_GUEST_CS_BASE:
4677 case VMX_VMCS_GUEST_SS_BASE:
4678 case VMX_VMCS_GUEST_DS_BASE:
4679 case VMX_VMCS_GUEST_FS_BASE:
4680 case VMX_VMCS_GUEST_GS_BASE:
4681 case VMX_VMCS_GUEST_LDTR_BASE:
4682 case VMX_VMCS_GUEST_TR_BASE:
4683 case VMX_VMCS_GUEST_GDTR_BASE:
4684 case VMX_VMCS_GUEST_IDTR_BASE:
4685 case VMX_VMCS_GUEST_RSP:
4686 case VMX_VMCS_GUEST_RIP:
4687 case VMX_VMCS_GUEST_SYSENTER_ESP:
4688 case VMX_VMCS_GUEST_SYSENTER_EIP:
4689 {
4690 if (!(u64Val >> 32))
4691 {
4692 /* If this field is 64-bit, VT-x will zero out the top bits. */
4693 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4694 }
4695 else
4696 {
4697 /* Assert that only the 32->64 switcher case should ever come here. */
4698 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4699 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4700 }
4701 break;
4702 }
4703
4704 default:
4705 {
4706 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
4707 rc = VERR_INVALID_PARAMETER;
4708 break;
4709 }
4710 }
4711 AssertRCReturn(rc, rc);
4712 return rc;
4713}
4714
4715
4716/**
4717 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4718 * hosts (except darwin) for 64-bit guests.
4719 *
4720 * @param pVCpu Pointer to the VMCPU.
4721 * @param idxField The VMCS field encoding.
4722 * @param u64Val 16, 32 or 64 bits value.
4723 */
4724VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4725{
4726 AssertPtr(pVCpu);
4727 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4728
4729 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4730 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4731
4732 /* Make sure there are no duplicates. */
4733 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4734 {
4735 if (pCache->Write.aField[i] == idxField)
4736 {
4737 pCache->Write.aFieldVal[i] = u64Val;
4738 return VINF_SUCCESS;
4739 }
4740 }
4741
4742 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4743 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4744 pCache->Write.cValidEntries++;
4745 return VINF_SUCCESS;
4746}
4747
4748/* Enable later when the assembly code uses these as callbacks. */
4749#if 0
4750/*
4751 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4752 *
4753 * @param pVCpu Pointer to the VMCPU.
4754 * @param pCache Pointer to the VMCS cache.
4755 *
4756 * @remarks No-long-jump zone!!!
4757 */
4758VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4759{
4760 AssertPtr(pCache);
4761 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4762 {
4763 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4764 AssertRC(rc);
4765 }
4766 pCache->Write.cValidEntries = 0;
4767}
4768
4769
4770/**
4771 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4772 *
4773 * @param pVCpu Pointer to the VMCPU.
4774 * @param pCache Pointer to the VMCS cache.
4775 *
4776 * @remarks No-long-jump zone!!!
4777 */
4778VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4779{
4780 AssertPtr(pCache);
4781 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4782 {
4783 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4784 AssertRC(rc);
4785 }
4786}
4787#endif
4788#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4789
4790
4791/**
4792 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4793 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4794 * timer.
4795 *
4796 * @returns VBox status code.
4797 * @param pVCpu Pointer to the VMCPU.
4798 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4799 * out-of-sync. Make sure to update the required fields
4800 * before using them.
4801 * @remarks No-long-jump zone!!!
4802 */
4803static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4804{
4805 int rc = VERR_INTERNAL_ERROR_5;
4806 bool fOffsettedTsc = false;
4807 PVM pVM = pVCpu->CTX_SUFF(pVM);
4808 if (pVM->hm.s.vmx.fUsePreemptTimer)
4809 {
4810 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4811
4812 /* Make sure the returned values have sane upper and lower boundaries. */
4813 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4814 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4815 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4816 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4817
4818 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4819 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4820 }
4821 else
4822 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4823
4824 if (fOffsettedTsc)
4825 {
4826 uint64_t u64CurTSC = ASMReadTSC();
4827 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4828 {
4829 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4830 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4831
4832 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4833 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4834 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4835 }
4836 else
4837 {
4838 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4839 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4840 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4841 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4842 }
4843 }
4844 else
4845 {
4846 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4847 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4848 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4849 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4850 }
4851}
4852
4853
4854/**
4855 * Determines if an exception is a contributory exception. Contributory
4856 * exceptions are ones which can cause double-faults. Page-fault is
4857 * intentionally not included here as it's a conditional contributory exception.
4858 *
4859 * @returns true if the exception is contributory, false otherwise.
4860 * @param uVector The exception vector.
4861 */
4862DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4863{
4864 switch (uVector)
4865 {
4866 case X86_XCPT_GP:
4867 case X86_XCPT_SS:
4868 case X86_XCPT_NP:
4869 case X86_XCPT_TS:
4870 case X86_XCPT_DE:
4871 return true;
4872 default:
4873 break;
4874 }
4875 return false;
4876}
4877
4878
4879/**
4880 * Sets an event as a pending event to be injected into the guest.
4881 *
4882 * @param pVCpu Pointer to the VMCPU.
4883 * @param u32IntrInfo The VM-entry interruption-information field.
4884 * @param cbInstr The VM-entry instruction length in bytes (for software
4885 * interrupts, exceptions and privileged software
4886 * exceptions).
4887 * @param u32ErrCode The VM-entry exception error code.
4888 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4889 * page-fault.
4890 *
4891 * @remarks Statistics counter assumes this is a guest event being injected or
4892 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
4893 * always incremented.
4894 */
4895DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4896 RTGCUINTPTR GCPtrFaultAddress)
4897{
4898 Assert(!pVCpu->hm.s.Event.fPending);
4899 pVCpu->hm.s.Event.fPending = true;
4900 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4901 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
4902 pVCpu->hm.s.Event.cbInstr = cbInstr;
4903 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
4904
4905 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
4906}
4907
4908
4909/**
4910 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
4911 *
4912 * @param pVCpu Pointer to the VMCPU.
4913 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4914 * out-of-sync. Make sure to update the required fields
4915 * before using them.
4916 */
4917DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4918{
4919 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
4920 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4921 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4922 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4923}
4924
4925
4926/**
4927 * Handle a condition that occurred while delivering an event through the guest
4928 * IDT.
4929 *
4930 * @returns VBox status code (informational error codes included).
4931 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4932 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
4933 * continue execution of the guest which will delivery the #DF.
4934 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4935 *
4936 * @param pVCpu Pointer to the VMCPU.
4937 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4938 * out-of-sync. Make sure to update the required fields
4939 * before using them.
4940 * @param pVmxTransient Pointer to the VMX transient structure.
4941 *
4942 * @remarks No-long-jump zone!!!
4943 */
4944static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4945{
4946 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4947 AssertRC(rc);
4948 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4949 {
4950 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4951 AssertRCReturn(rc, rc);
4952
4953 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4954 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4955 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4956
4957 typedef enum
4958 {
4959 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4960 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4961 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4962 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4963 } VMXREFLECTXCPT;
4964
4965 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4966 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4967 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntrInfo))
4968 {
4969 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4970 {
4971 enmReflect = VMXREFLECTXCPT_XCPT;
4972#ifdef VBOX_STRICT
4973 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4974 && uExitVector == X86_XCPT_PF)
4975 {
4976 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
4977 }
4978#endif
4979 if ( uExitVector == X86_XCPT_PF
4980 && uIdtVector == X86_XCPT_PF)
4981 {
4982 pVmxTransient->fVectoringPF = true;
4983 Log4(("IDT: vcpu[%RU32] Vectoring #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
4984 }
4985 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
4986 && hmR0VmxIsContributoryXcpt(uExitVector)
4987 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
4988 || uIdtVector == X86_XCPT_PF))
4989 {
4990 enmReflect = VMXREFLECTXCPT_DF;
4991 }
4992 else if (uIdtVector == X86_XCPT_DF)
4993 enmReflect = VMXREFLECTXCPT_TF;
4994 }
4995 else if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
4996 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
4997 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
4998 {
4999 /*
5000 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
5001 * (whatever they are) as they reoccur when restarting the instruction.
5002 */
5003 enmReflect = VMXREFLECTXCPT_XCPT;
5004 }
5005 }
5006 else
5007 {
5008 /*
5009 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5010 * interruption-information will not be valid and we end up here. In such cases, it is sufficient to reflect the
5011 * original exception to the guest after handling the VM-exit.
5012 */
5013 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5014 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5015 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5016 {
5017 enmReflect = VMXREFLECTXCPT_XCPT;
5018 }
5019 }
5020
5021 switch (enmReflect)
5022 {
5023 case VMXREFLECTXCPT_XCPT:
5024 {
5025 Assert( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5026 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5027 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5028
5029 uint32_t u32ErrCode = 0;
5030 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5031 {
5032 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5033 AssertRCReturn(rc, rc);
5034 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5035 }
5036
5037 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5038 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5039 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5040 rc = VINF_SUCCESS;
5041 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5042 pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));
5043
5044 break;
5045 }
5046
5047 case VMXREFLECTXCPT_DF:
5048 {
5049 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5050 rc = VINF_HM_DOUBLE_FAULT;
5051 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5052 pVCpu->hm.s.Event.u64IntrInfo, uIdtVector, uExitVector));
5053
5054 break;
5055 }
5056
5057 case VMXREFLECTXCPT_TF:
5058 {
5059 rc = VINF_EM_RESET;
5060 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5061 uExitVector));
5062 break;
5063 }
5064
5065 default:
5066 Assert(rc == VINF_SUCCESS);
5067 break;
5068 }
5069 }
5070 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
5071 return rc;
5072}
5073
5074
5075/**
5076 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5077 *
5078 * @returns VBox status code.
5079 * @param pVCpu Pointer to the VMCPU.
5080 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5081 * out-of-sync. Make sure to update the required fields
5082 * before using them.
5083 *
5084 * @remarks No-long-jump zone!!!
5085 */
5086static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5087{
5088 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
5089 {
5090 uint32_t uVal = 0;
5091 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5092 AssertRCReturn(rc, rc);
5093 uint32_t uShadow = 0;
5094 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5095 AssertRCReturn(rc, rc);
5096
5097 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
5098 CPUMSetGuestCR0(pVCpu, uVal);
5099 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
5100 }
5101 return VINF_SUCCESS;
5102}
5103
5104
5105/**
5106 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
5107 *
5108 * @returns VBox status code.
5109 * @param pVCpu Pointer to the VMCPU.
5110 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5111 * out-of-sync. Make sure to update the required fields
5112 * before using them.
5113 *
5114 * @remarks No-long-jump zone!!!
5115 */
5116static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5117{
5118 int rc = VINF_SUCCESS;
5119 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
5120 {
5121 uint32_t uVal = 0;
5122 uint32_t uShadow = 0;
5123 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
5124 AssertRCReturn(rc, rc);
5125 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
5126 AssertRCReturn(rc, rc);
5127
5128 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
5129 CPUMSetGuestCR4(pVCpu, uVal);
5130 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
5131 }
5132 return rc;
5133}
5134
5135
5136/**
5137 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
5138 *
5139 * @returns VBox status code.
5140 * @param pVCpu Pointer to the VMCPU.
5141 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5142 * out-of-sync. Make sure to update the required fields
5143 * before using them.
5144 *
5145 * @remarks No-long-jump zone!!!
5146 */
5147static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5148{
5149 int rc = VINF_SUCCESS;
5150 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
5151 {
5152 uint64_t u64Val = 0;
5153 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
5154 AssertRCReturn(rc, rc);
5155
5156 pMixedCtx->rip = u64Val;
5157 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
5158 }
5159 return rc;
5160}
5161
5162
5163/**
5164 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
5165 *
5166 * @returns VBox status code.
5167 * @param pVCpu Pointer to the VMCPU.
5168 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5169 * out-of-sync. Make sure to update the required fields
5170 * before using them.
5171 *
5172 * @remarks No-long-jump zone!!!
5173 */
5174static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5175{
5176 int rc = VINF_SUCCESS;
5177 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
5178 {
5179 uint64_t u64Val = 0;
5180 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
5181 AssertRCReturn(rc, rc);
5182
5183 pMixedCtx->rsp = u64Val;
5184 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
5185 }
5186 return rc;
5187}
5188
5189
5190/**
5191 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
5192 *
5193 * @returns VBox status code.
5194 * @param pVCpu Pointer to the VMCPU.
5195 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5196 * out-of-sync. Make sure to update the required fields
5197 * before using them.
5198 *
5199 * @remarks No-long-jump zone!!!
5200 */
5201static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5202{
5203 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
5204 {
5205 uint32_t uVal = 0;
5206 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
5207 AssertRCReturn(rc, rc);
5208
5209 pMixedCtx->eflags.u32 = uVal;
5210 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
5211 {
5212 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5213 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
5214
5215 pMixedCtx->eflags.Bits.u1VM = 0;
5216 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
5217 }
5218
5219 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
5220 }
5221 return VINF_SUCCESS;
5222}
5223
5224
5225/**
5226 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
5227 * guest-CPU context.
5228 */
5229DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5230{
5231 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5232 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
5233 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5234 return rc;
5235}
5236
5237
5238/**
5239 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
5240 * from the guest-state area in the VMCS.
5241 *
5242 * @param pVCpu Pointer to the VMCPU.
5243 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5244 * out-of-sync. Make sure to update the required fields
5245 * before using them.
5246 *
5247 * @remarks No-long-jump zone!!!
5248 */
5249static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5250{
5251 uint32_t uIntrState = 0;
5252 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
5253 AssertRC(rc);
5254
5255 if (!uIntrState)
5256 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5257 else
5258 {
5259 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
5260 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5261 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5262 AssertRC(rc);
5263 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
5264 AssertRC(rc);
5265
5266 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
5267 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
5268 }
5269}
5270
5271
5272/**
5273 * Saves the guest's activity state.
5274 *
5275 * @returns VBox status code.
5276 * @param pVCpu Pointer to the VMCPU.
5277 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5278 * out-of-sync. Make sure to update the required fields
5279 * before using them.
5280 *
5281 * @remarks No-long-jump zone!!!
5282 */
5283static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5284{
5285 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
5286 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
5287 return VINF_SUCCESS;
5288}
5289
5290
5291/**
5292 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
5293 * the current VMCS into the guest-CPU context.
5294 *
5295 * @returns VBox status code.
5296 * @param pVCpu Pointer to the VMCPU.
5297 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5298 * out-of-sync. Make sure to update the required fields
5299 * before using them.
5300 *
5301 * @remarks No-long-jump zone!!!
5302 */
5303static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5304{
5305 int rc = VINF_SUCCESS;
5306 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
5307 {
5308 uint32_t u32Val = 0;
5309 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
5310 pMixedCtx->SysEnter.cs = u32Val;
5311 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
5312 }
5313
5314 uint64_t u64Val = 0;
5315 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
5316 {
5317 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
5318 pMixedCtx->SysEnter.eip = u64Val;
5319 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
5320 }
5321 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
5322 {
5323 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
5324 pMixedCtx->SysEnter.esp = u64Val;
5325 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
5326 }
5327 return rc;
5328}
5329
5330
5331/**
5332 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
5333 * context.
5334 *
5335 * @returns VBox status code.
5336 * @param pVCpu Pointer to the VMCPU.
5337 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5338 * out-of-sync. Make sure to update the required fields
5339 * before using them.
5340 *
5341 * @remarks No-long-jump zone!!!
5342 */
5343static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5344{
5345 int rc = VINF_SUCCESS;
5346 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
5347 {
5348 uint64_t u64Val = 0;
5349 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &u64Val); AssertRCReturn(rc, rc);
5350 pMixedCtx->fs.u64Base = u64Val;
5351 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
5352 }
5353 return rc;
5354}
5355
5356
5357/**
5358 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
5359 * context.
5360 *
5361 * @returns VBox status code.
5362 * @param pVCpu Pointer to the VMCPU.
5363 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5364 * out-of-sync. Make sure to update the required fields
5365 * before using them.
5366 *
5367 * @remarks No-long-jump zone!!!
5368 */
5369static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5370{
5371 int rc = VINF_SUCCESS;
5372 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
5373 {
5374 uint64_t u64Val = 0;
5375 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &u64Val); AssertRCReturn(rc, rc);
5376 pMixedCtx->gs.u64Base = u64Val;
5377 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
5378 }
5379 return rc;
5380}
5381
5382
5383/**
5384 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5385 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
5386 * and TSC_AUX.
5387 *
5388 * @returns VBox status code.
5389 * @param pVCpu Pointer to the VMCPU.
5390 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5391 * out-of-sync. Make sure to update the required fields
5392 * before using them.
5393 *
5394 * @remarks No-long-jump zone!!!
5395 */
5396static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5397{
5398 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5399 return VINF_SUCCESS;
5400
5401#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
5402 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5403 {
5404 PVMXAUTOMSR pMsr = (PVMXAUTOMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5405 pMsr += i;
5406 switch (pMsr->u32Msr)
5407 {
5408 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5409 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5410 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5411 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5412 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5413 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5414 default:
5415 {
5416 AssertFailed();
5417 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5418 }
5419 }
5420 }
5421#endif
5422
5423 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5424 return VINF_SUCCESS;
5425}
5426
5427
5428/**
5429 * Saves the guest control registers from the current VMCS into the guest-CPU
5430 * context.
5431 *
5432 * @returns VBox status code.
5433 * @param pVCpu Pointer to the VMCPU.
5434 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5435 * out-of-sync. Make sure to update the required fields
5436 * before using them.
5437 *
5438 * @remarks No-long-jump zone!!!
5439 */
5440static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5441{
5442 /* Guest CR0. Guest FPU. */
5443 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5444 AssertRCReturn(rc, rc);
5445
5446 /* Guest CR4. */
5447 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
5448 AssertRCReturn(rc, rc);
5449
5450 /* Guest CR2 - updated always during the world-switch or in #PF. */
5451 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5452 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
5453 {
5454 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
5455 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4);
5456
5457 PVM pVM = pVCpu->CTX_SUFF(pVM);
5458 if ( pVM->hm.s.vmx.fUnrestrictedGuest
5459 || ( pVM->hm.s.fNestedPaging
5460 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
5461 {
5462 uint64_t u64Val = 0;
5463 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
5464 if (pMixedCtx->cr3 != u64Val)
5465 {
5466 CPUMSetGuestCR3(pVCpu, u64Val);
5467 if (VMMRZCallRing3IsEnabled(pVCpu))
5468 {
5469 PGMUpdateCR3(pVCpu, u64Val);
5470 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5471 }
5472 else
5473 {
5474 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5475 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5476 }
5477 }
5478
5479 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5480 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
5481 {
5482 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
5483 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
5484 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
5485 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
5486
5487 if (VMMRZCallRing3IsEnabled(pVCpu))
5488 {
5489 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5490 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5491 }
5492 else
5493 {
5494 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5495 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5496 }
5497 }
5498 }
5499
5500 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
5501 }
5502
5503 /*
5504 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
5505 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
5506 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
5507 *
5508 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
5509 */
5510 if (VMMRZCallRing3IsEnabled(pVCpu))
5511 {
5512 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5513 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
5514
5515 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5516 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5517
5518 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5519 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5520 }
5521
5522 return rc;
5523}
5524
5525
5526/**
5527 * Reads a guest segment register from the current VMCS into the guest-CPU
5528 * context.
5529 *
5530 * @returns VBox status code.
5531 * @param pVCpu Pointer to the VMCPU.
5532 * @param idxSel Index of the selector in the VMCS.
5533 * @param idxLimit Index of the segment limit in the VMCS.
5534 * @param idxBase Index of the segment base in the VMCS.
5535 * @param idxAccess Index of the access rights of the segment in the VMCS.
5536 * @param pSelReg Pointer to the segment selector.
5537 *
5538 * @remarks No-long-jump zone!!!
5539 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
5540 * macro as that takes care of whether to read from the VMCS cache or
5541 * not.
5542 */
5543DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5544 PCPUMSELREG pSelReg)
5545{
5546 uint32_t u32Val = 0;
5547 int rc = VMXReadVmcs32(idxSel, &u32Val);
5548 AssertRCReturn(rc, rc);
5549 pSelReg->Sel = (uint16_t)u32Val;
5550 pSelReg->ValidSel = (uint16_t)u32Val;
5551 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5552
5553 rc = VMXReadVmcs32(idxLimit, &u32Val);
5554 AssertRCReturn(rc, rc);
5555 pSelReg->u32Limit = u32Val;
5556
5557 uint64_t u64Val = 0;
5558 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
5559 AssertRCReturn(rc, rc);
5560 pSelReg->u64Base = u64Val;
5561
5562 rc = VMXReadVmcs32(idxAccess, &u32Val);
5563 AssertRCReturn(rc, rc);
5564 pSelReg->Attr.u = u32Val;
5565
5566 /*
5567 * If VT-x marks the segment as unusable, most other bits remain undefined:
5568 * - For CS the L, D and G bits have meaning.
5569 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
5570 * - For the remaining data segments no bits are defined.
5571 *
5572 * The present bit and the unusable bit has been observed to be set at the
5573 * same time (the selector was supposed to invalid as we started executing
5574 * a V8086 interrupt in ring-0).
5575 *
5576 * What should be important for the rest of the VBox code that the P bit is
5577 * cleared. Some of the other VBox code recognizes the unusable bit, but
5578 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
5579 * safe side here, we'll strip off P and other bits we don't care about. If
5580 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
5581 *
5582 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
5583 */
5584 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
5585 {
5586 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
5587
5588 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
5589 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
5590 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
5591
5592 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
5593#ifdef DEBUG_bird
5594 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
5595 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
5596 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
5597#endif
5598 }
5599 return VINF_SUCCESS;
5600}
5601
5602
5603#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5604# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5605 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5606 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5607#else
5608# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5609 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5610 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5611#endif
5612
5613
5614/**
5615 * Saves the guest segment registers from the current VMCS into the guest-CPU
5616 * context.
5617 *
5618 * @returns VBox status code.
5619 * @param pVCpu Pointer to the VMCPU.
5620 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5621 * out-of-sync. Make sure to update the required fields
5622 * before using them.
5623 *
5624 * @remarks No-long-jump zone!!!
5625 */
5626static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5627{
5628 /* Guest segment registers. */
5629 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
5630 {
5631 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
5632 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
5633 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
5634 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
5635 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
5636 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
5637 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
5638
5639 /* Restore segment attributes for real-on-v86 mode hack. */
5640 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5641 {
5642 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
5643 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
5644 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
5645 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
5646 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
5647 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
5648 }
5649 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
5650 }
5651
5652 return VINF_SUCCESS;
5653}
5654
5655
5656/**
5657 * Saves the guest descriptor table registers and task register from the current
5658 * VMCS into the guest-CPU context.
5659 *
5660 * @returns VBox status code.
5661 * @param pVCpu Pointer to the VMCPU.
5662 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5663 * out-of-sync. Make sure to update the required fields
5664 * before using them.
5665 *
5666 * @remarks No-long-jump zone!!!
5667 */
5668static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5669{
5670 int rc = VINF_SUCCESS;
5671
5672 /* Guest LDTR. */
5673 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
5674 {
5675 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5676 AssertRCReturn(rc, rc);
5677 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
5678 }
5679
5680 /* Guest GDTR. */
5681 uint64_t u64Val = 0;
5682 uint32_t u32Val = 0;
5683 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
5684 {
5685 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5686 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5687 pMixedCtx->gdtr.pGdt = u64Val;
5688 pMixedCtx->gdtr.cbGdt = u32Val;
5689 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
5690 }
5691
5692 /* Guest IDTR. */
5693 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
5694 {
5695 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5696 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5697 pMixedCtx->idtr.pIdt = u64Val;
5698 pMixedCtx->idtr.cbIdt = u32Val;
5699 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
5700 }
5701
5702 /* Guest TR. */
5703 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
5704 {
5705 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5706 AssertRCReturn(rc, rc);
5707
5708 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5709 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5710 {
5711 rc = VMXLOCAL_READ_SEG(TR, tr);
5712 AssertRCReturn(rc, rc);
5713 }
5714 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
5715 }
5716 return rc;
5717}
5718
5719#undef VMXLOCAL_READ_SEG
5720
5721
5722/**
5723 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
5724 * context.
5725 *
5726 * @returns VBox status code.
5727 * @param pVCpu Pointer to the VMCPU.
5728 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5729 * out-of-sync. Make sure to update the required fields
5730 * before using them.
5731 *
5732 * @remarks No-long-jump zone!!!
5733 */
5734static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5735{
5736 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
5737 {
5738 if (!CPUMIsHyperDebugStateActive(pVCpu))
5739 {
5740 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
5741 uint32_t u32Val;
5742 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
5743 pMixedCtx->dr[7] = u32Val;
5744 }
5745
5746 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
5747 }
5748 return VINF_SUCCESS;
5749}
5750
5751
5752/**
5753 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
5754 *
5755 * @returns VBox status code.
5756 * @param pVCpu Pointer to the VMCPU.
5757 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5758 * out-of-sync. Make sure to update the required fields
5759 * before using them.
5760 *
5761 * @remarks No-long-jump zone!!!
5762 */
5763static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5764{
5765 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5766 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
5767 return VINF_SUCCESS;
5768}
5769
5770
5771/**
5772 * Saves the entire guest state from the currently active VMCS into the
5773 * guest-CPU context. This essentially VMREADs all guest-data.
5774 *
5775 * @returns VBox status code.
5776 * @param pVCpu Pointer to the VMCPU.
5777 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5778 * out-of-sync. Make sure to update the required fields
5779 * before using them.
5780 */
5781static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5782{
5783 Assert(pVCpu);
5784 Assert(pMixedCtx);
5785
5786 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
5787 return VINF_SUCCESS;
5788
5789 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled again on the ring-3 callback path,
5790 there is no real need to. */
5791 if (VMMRZCallRing3IsEnabled(pVCpu))
5792 VMMR0LogFlushDisable(pVCpu);
5793 else
5794 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5795 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
5796
5797 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
5798 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5799
5800 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5801 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5802
5803 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
5804 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5805
5806 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
5807 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5808
5809 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
5810 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5811
5812 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
5813 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5814
5815 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
5816 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5817
5818 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
5819 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5820
5821 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5822 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5823
5824 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
5825 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5826
5827 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
5828 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5829
5830 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
5831 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5832
5833 if (VMMRZCallRing3IsEnabled(pVCpu))
5834 VMMR0LogFlushEnable(pVCpu);
5835
5836 return rc;
5837}
5838
5839
5840/**
5841 * Check per-VM and per-VCPU force flag actions that require us to go back to
5842 * ring-3 for one reason or another.
5843 *
5844 * @returns VBox status code (information status code included).
5845 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5846 * ring-3.
5847 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5848 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5849 * interrupts)
5850 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5851 * all EMTs to be in ring-3.
5852 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5853 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5854 * to the EM loop.
5855 *
5856 * @param pVM Pointer to the VM.
5857 * @param pVCpu Pointer to the VMCPU.
5858 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5859 * out-of-sync. Make sure to update the required fields
5860 * before using them.
5861 */
5862static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5863{
5864 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5865
5866 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
5867 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
5868 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
5869 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
5870 {
5871 /* We need the control registers now, make sure the guest-CPU context is updated. */
5872 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5873 AssertRCReturn(rc3, rc3);
5874
5875 /* Pending HM CR3 sync. */
5876 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5877 {
5878 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5879 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
5880 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
5881 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5882 }
5883
5884 /* Pending HM PAE PDPEs. */
5885 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5886 {
5887 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5888 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5889 }
5890
5891 /* Pending PGM C3 sync. */
5892 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5893 {
5894 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5895 if (rc2 != VINF_SUCCESS)
5896 {
5897 AssertRC(rc2);
5898 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
5899 return rc2;
5900 }
5901 }
5902
5903 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5904 /* -XXX- what was that about single stepping? */
5905 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5906 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5907 {
5908 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5909 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5910 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
5911 return rc2;
5912 }
5913
5914 /* Pending VM request packets, such as hardware interrupts. */
5915 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5916 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5917 {
5918 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5919 return VINF_EM_PENDING_REQUEST;
5920 }
5921
5922 /* Pending PGM pool flushes. */
5923 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5924 {
5925 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5926 return VINF_PGM_POOL_FLUSH_PENDING;
5927 }
5928
5929 /* Pending DMA requests. */
5930 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5931 {
5932 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5933 return VINF_EM_RAW_TO_R3;
5934 }
5935 }
5936
5937 /* Paranoia. */
5938 return VINF_SUCCESS;
5939}
5940
5941
5942/**
5943 * Converts any TRPM trap into a pending HM event. This is typically used when
5944 * entering from ring-3 (not longjmp returns).
5945 *
5946 * @param pVCpu Pointer to the VMCPU.
5947 */
5948static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
5949{
5950 Assert(TRPMHasTrap(pVCpu));
5951 Assert(!pVCpu->hm.s.Event.fPending);
5952
5953 uint8_t uVector;
5954 TRPMEVENT enmTrpmEvent;
5955 RTGCUINT uErrCode;
5956 RTGCUINTPTR GCPtrFaultAddress;
5957 uint8_t cbInstr;
5958
5959 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
5960 AssertRC(rc);
5961
5962 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5963 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
5964 if (enmTrpmEvent == TRPM_TRAP)
5965 {
5966 switch (uVector)
5967 {
5968 case X86_XCPT_BP:
5969 case X86_XCPT_OF:
5970 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5971 break;
5972
5973 case X86_XCPT_PF:
5974 case X86_XCPT_DF:
5975 case X86_XCPT_TS:
5976 case X86_XCPT_NP:
5977 case X86_XCPT_SS:
5978 case X86_XCPT_GP:
5979 case X86_XCPT_AC:
5980 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5981 /* no break! */
5982 default:
5983 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5984 break;
5985 }
5986 }
5987 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5988 {
5989 if (uVector == X86_XCPT_NMI)
5990 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5991 else
5992 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5993 }
5994 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
5995 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5996 else
5997 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5998
5999 rc = TRPMResetTrap(pVCpu);
6000 AssertRC(rc);
6001 Log4(("TRPM->HM event: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6002 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6003
6004 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6005 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
6006}
6007
6008
6009/**
6010 * Converts any pending HM event into a TRPM trap. Typically used when leaving
6011 * VT-x to execute any instruction.
6012 *
6013 * @param pvCpu Pointer to the VMCPU.
6014 */
6015static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6016{
6017 Assert(pVCpu->hm.s.Event.fPending);
6018
6019 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
6020 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
6021 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
6022 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6023
6024 /* If a trap was already pending, we did something wrong! */
6025 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6026
6027 TRPMEVENT enmTrapType;
6028 switch (uVectorType)
6029 {
6030 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6031 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6032 enmTrapType = TRPM_HARDWARE_INT;
6033 break;
6034
6035 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6036 enmTrapType = TRPM_SOFTWARE_INT;
6037 break;
6038
6039 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6040 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6041 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6042 enmTrapType = TRPM_TRAP;
6043 break;
6044
6045 default:
6046 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6047 enmTrapType = TRPM_32BIT_HACK;
6048 break;
6049 }
6050
6051 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6052
6053 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6054 AssertRC(rc);
6055
6056 if (fErrorCodeValid)
6057 TRPMSetErrorCode(pVCpu, uErrorCode);
6058
6059 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6060 && uVector == X86_XCPT_PF)
6061 {
6062 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6063 }
6064 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6065 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6066 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6067 {
6068 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6069 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6070 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6071 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6072 }
6073 pVCpu->hm.s.Event.fPending = false;
6074}
6075
6076
6077/**
6078 * Does the necessary state syncing before returning to ring-3 for any reason
6079 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6080 *
6081 * @param pVM Pointer to the VM.
6082 * @param pVCpu Pointer to the VMCPU.
6083 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6084 * out-of-sync. Make sure to update the required fields
6085 * before using them.
6086 *
6087 * @remarks No-long-jmp zone!!!
6088 */
6089static void hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6090{
6091 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6092 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6093
6094 RTCPUID idCpu = RTMpCpuId();
6095 Log4Func(("HostCpuId=%u\n", idCpu));
6096
6097 /* Save the guest state if necessary. */
6098 if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL)
6099 {
6100 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6101 AssertRC(rc);
6102 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
6103 }
6104
6105 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
6106 if (CPUMIsGuestFPUStateActive(pVCpu))
6107 {
6108 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
6109 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6110 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
6111 }
6112
6113 /* Restore host debug registers if necessary and resync on next R0 reentry. */
6114#ifdef VBOX_STRICT
6115 if (CPUMIsHyperDebugStateActive(pVCpu))
6116 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
6117#endif
6118 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
6119 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
6120 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
6121 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
6122
6123 /* Restore host-state bits that VT-x only restores partially. */
6124 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
6125 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
6126 {
6127 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
6128 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6129 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6130 }
6131
6132 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
6133 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
6134 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
6135 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
6136 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
6137 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
6138 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
6139 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6140
6141 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
6142
6143 /** @todo This kinda defeats the purpose of having preemption hooks.
6144 * The problem is, deregistering the hooks should be moved to a place that
6145 * lasts until the EMT is about to be destroyed not everytime while leaving HM
6146 * context.
6147 */
6148 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
6149 {
6150 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
6151 AssertRC(rc);
6152 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
6153 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
6154 }
6155 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
6156 NOREF(idCpu);
6157}
6158
6159
6160/**
6161 * Leaves the VT-x session.
6162 *
6163 * @param pVM Pointer to the VM.
6164 * @param pVCpu Pointer to the VMCPU.
6165 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6166 * out-of-sync. Make sure to update the required fields
6167 * before using them.
6168 *
6169 * @remarks No-long-jmp zone!!!
6170 */
6171DECLINLINE(void) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6172{
6173 HM_DISABLE_PREEMPT_IF_NEEDED();
6174 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6175 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6176
6177 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
6178 and done this from the VMXR0ThreadCtxCallback(). */
6179 if (!pVCpu->hm.s.fLeaveDone)
6180 {
6181 hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
6182 pVCpu->hm.s.fLeaveDone = true;
6183 }
6184
6185 /* Deregister hook now that we've left HM context before re-enabling preemption. */
6186 /** @todo This is bad. Deregistering here means we need to VMCLEAR always
6187 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
6188 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
6189 VMMR0ThreadCtxHooksDeregister(pVCpu);
6190
6191 /* Leave HM context. This takes care of local init (term). */
6192 int rc = HMR0LeaveCpu(pVCpu);
6193 AssertRC(rc); NOREF(rc);
6194
6195 HM_RESTORE_PREEMPT_IF_NEEDED();
6196}
6197
6198
6199/**
6200 * Does the necessary state syncing before doing a longjmp to ring-3.
6201 *
6202 * @param pVM Pointer to the VM.
6203 * @param pVCpu Pointer to the VMCPU.
6204 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6205 * out-of-sync. Make sure to update the required fields
6206 * before using them.
6207 *
6208 * @remarks No-long-jmp zone!!!
6209 */
6210DECLINLINE(void) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6211{
6212 hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
6213}
6214
6215
6216/**
6217 * Take necessary actions before going back to ring-3.
6218 *
6219 * An action requires us to go back to ring-3. This function does the necessary
6220 * steps before we can safely return to ring-3. This is not the same as longjmps
6221 * to ring-3, this is voluntary and prepares the guest so it may continue
6222 * executing outside HM (recompiler/IEM).
6223 *
6224 * @param pVM Pointer to the VM.
6225 * @param pVCpu Pointer to the VMCPU.
6226 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6227 * out-of-sync. Make sure to update the required fields
6228 * before using them.
6229 * @param rcExit The reason for exiting to ring-3. Can be
6230 * VINF_VMM_UNKNOWN_RING3_CALL.
6231 */
6232static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
6233{
6234 Assert(pVM);
6235 Assert(pVCpu);
6236 Assert(pMixedCtx);
6237 HMVMX_ASSERT_PREEMPT_SAFE();
6238
6239 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
6240 {
6241 /* We've done what is required in hmR0VmxExitErrInvalidGuestState(). We're not going to continue guest execution... */
6242 return;
6243 }
6244 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
6245 {
6246 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
6247 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
6248 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
6249 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
6250 return;
6251 }
6252
6253 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
6254 VMMRZCallRing3Disable(pVCpu);
6255 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
6256
6257 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
6258 if (pVCpu->hm.s.Event.fPending)
6259 {
6260 hmR0VmxPendingEventToTrpmTrap(pVCpu);
6261 Assert(!pVCpu->hm.s.Event.fPending);
6262 }
6263
6264 /* Save guest state and restore host state bits. */
6265 hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
6266 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6267
6268 /* Sync recompiler state. */
6269 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
6270 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
6271 | CPUM_CHANGED_LDTR
6272 | CPUM_CHANGED_GDTR
6273 | CPUM_CHANGED_IDTR
6274 | CPUM_CHANGED_TR
6275 | CPUM_CHANGED_HIDDEN_SEL_REGS);
6276 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
6277 if ( pVM->hm.s.fNestedPaging
6278 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
6279 {
6280 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
6281 }
6282
6283 /*
6284 * Clear the X86_EFL_TF if necessary .
6285 */
6286 if (pVCpu->hm.s.fClearTrapFlag)
6287 {
6288 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
6289 pMixedCtx->eflags.Bits.u1TF = 0;
6290 pVCpu->hm.s.fClearTrapFlag = false;
6291 }
6292 /** @todo there seems to be issues with the resume flag when the monitor trap
6293 * flag is pending without being used. Seen early in bios init when
6294 * accessing APIC page in prot mode. */
6295
6296 /* On our way back from ring-3 the following needs to be done. */
6297 if (rcExit == VINF_EM_RAW_INTERRUPT)
6298 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
6299 else
6300 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
6301
6302 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
6303 VMMRZCallRing3Enable(pVCpu);
6304}
6305
6306
6307/**
6308 * VMMRZCallRing3() callback wrapper which saves the guest state before we
6309 * longjump to ring-3 and possibly get preempted.
6310 *
6311 * @param pVCpu Pointer to the VMCPU.
6312 * @param enmOperation The operation causing the ring-3 longjump.
6313 * @param pvUser The user argument (pointer to the possibly
6314 * out-of-date guest-CPU context).
6315 *
6316 * @remarks Must never be called with @a enmOperation ==
6317 * VMMCALLRING3_VM_R0_ASSERTION.
6318 */
6319DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
6320{
6321 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion. */
6322 Assert(pVCpu);
6323 Assert(pvUser);
6324 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6325 HMVMX_ASSERT_PREEMPT_SAFE();
6326
6327 VMMRZCallRing3Disable(pVCpu);
6328 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6329
6330 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
6331 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
6332
6333 VMMRZCallRing3Enable(pVCpu);
6334}
6335
6336
6337/**
6338 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
6339 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
6340 *
6341 * @param pVCpu Pointer to the VMCPU.
6342 */
6343DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
6344{
6345 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.VmxProcCtls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6346 {
6347 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6348 {
6349 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
6350 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
6351 AssertRC(rc);
6352 }
6353 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
6354}
6355
6356
6357/**
6358 * Evaluates the event to be delivered to the guest and sets it as the pending
6359 * event.
6360 *
6361 * @param pVCpu Pointer to the VMCPU.
6362 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6363 * out-of-sync. Make sure to update the required fields
6364 * before using them.
6365 */
6366static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6367{
6368 Assert(!pVCpu->hm.s.Event.fPending);
6369
6370 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
6371 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
6372 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6373 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6374
6375 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
6376 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
6377 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
6378 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
6379 Assert(!TRPMHasTrap(pVCpu));
6380
6381 /** @todo SMI. SMIs take priority over NMIs. */
6382 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
6383 {
6384 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6385 if ( !fBlockMovSS
6386 && !fBlockSti)
6387 {
6388 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6389 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
6390 uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
6391 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6392
6393 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddres */);
6394 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
6395 }
6396 else
6397 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6398 }
6399 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
6400 && !pVCpu->hm.s.fSingleInstruction)
6401 {
6402 /*
6403 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
6404 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is
6405 * evaluated here and not set as pending, solely based on the force-flags.
6406 */
6407 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6408 AssertRC(rc);
6409 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6410 if ( !fBlockInt
6411 && !fBlockSti
6412 && !fBlockMovSS)
6413 {
6414 uint8_t u8Interrupt;
6415 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
6416 if (RT_SUCCESS(rc))
6417 {
6418 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
6419 uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
6420 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6421
6422 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
6423 }
6424 else
6425 {
6426 /** @todo Does this actually happen? If not turn it into an assertion. */
6427 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
6428 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
6429 }
6430 }
6431 else
6432 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6433 }
6434}
6435
6436
6437/**
6438 * Injects any pending events into the guest if the guest is in a state to
6439 * receive them.
6440 *
6441 * @returns VBox status code (informational status codes included).
6442 * @param pVCpu Pointer to the VMCPU.
6443 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6444 * out-of-sync. Make sure to update the required fields
6445 * before using them.
6446 *
6447 * @remarks No-long-jump zone!!!
6448 */
6449static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6450{
6451 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
6452 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
6453 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6454 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6455
6456 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
6457 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
6458 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
6459 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
6460 Assert(!TRPMHasTrap(pVCpu));
6461
6462 int rc = VINF_SUCCESS;
6463 if (pVCpu->hm.s.Event.fPending)
6464 {
6465#if defined(VBOX_STRICT) || defined(VBOX_WITH_STATISTICS)
6466 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
6467 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
6468 {
6469 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6470 AssertRCReturn(rc, rc);
6471 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6472 Assert(!fBlockInt);
6473 Assert(!fBlockSti);
6474 Assert(!fBlockMovSS);
6475 }
6476 else if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
6477 {
6478 Assert(!fBlockSti);
6479 Assert(!fBlockMovSS);
6480 }
6481#endif
6482 Log4(("Injecting pending event vcpu[%RU32] u64IntrInfo=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntrInfo));
6483 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
6484 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
6485 AssertRCReturn(rc, rc);
6486
6487 pVCpu->hm.s.Event.fPending = false;
6488
6489 /* Update the interruptibility-state as it could have been changed by
6490 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
6491 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6492 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6493
6494#ifdef VBOX_WITH_STATISTICS
6495 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
6496 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
6497 else
6498 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
6499#endif
6500 }
6501
6502 /* Delivery pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
6503 int rc2 = VINF_SUCCESS;
6504 if ( fBlockSti
6505 || fBlockMovSS)
6506 {
6507 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
6508 {
6509 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
6510 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
6511 {
6512 /*
6513 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
6514 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
6515 * See Intel spec. 27.3.4 "Saving Non-Register State".
6516 */
6517 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
6518 AssertRCReturn(rc, rc);
6519 }
6520 }
6521 else
6522 {
6523 /* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
6524 delivering a #DB to the guest upon VM-entry when it shouldn't be. */
6525 uIntrState = 0;
6526 }
6527 }
6528
6529 /*
6530 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
6531 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6532 */
6533 rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
6534 AssertRC(rc2);
6535
6536 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6537 return rc;
6538}
6539
6540
6541/**
6542 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
6543 *
6544 * @param pVCpu Pointer to the VMCPU.
6545 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6546 * out-of-sync. Make sure to update the required fields
6547 * before using them.
6548 */
6549DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6550{
6551 uint32_t u32IntrInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
6552 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6553}
6554
6555
6556/**
6557 * Injects a double-fault (#DF) exception into the VM.
6558 *
6559 * @returns VBox status code (informational status code included).
6560 * @param pVCpu Pointer to the VMCPU.
6561 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6562 * out-of-sync. Make sure to update the required fields
6563 * before using them.
6564 */
6565DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
6566{
6567 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6568 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6569 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6570 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
6571 puIntrState);
6572}
6573
6574
6575/**
6576 * Sets a debug (#DB) exception as pending-for-injection into the VM.
6577 *
6578 * @param pVCpu Pointer to the VMCPU.
6579 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6580 * out-of-sync. Make sure to update the required fields
6581 * before using them.
6582 */
6583DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6584{
6585 uint32_t u32IntrInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
6586 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6587 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6588}
6589
6590
6591/**
6592 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
6593 *
6594 * @param pVCpu Pointer to the VMCPU.
6595 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6596 * out-of-sync. Make sure to update the required fields
6597 * before using them.
6598 * @param cbInstr The value of RIP that is to be pushed on the guest
6599 * stack.
6600 */
6601DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
6602{
6603 uint32_t u32IntrInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6604 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6605 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6606}
6607
6608
6609/**
6610 * Injects a general-protection (#GP) fault into the VM.
6611 *
6612 * @returns VBox status code (informational status code included).
6613 * @param pVCpu Pointer to the VMCPU.
6614 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6615 * out-of-sync. Make sure to update the required fields
6616 * before using them.
6617 * @param u32ErrorCode The error code associated with the #GP.
6618 */
6619DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
6620 uint32_t *puIntrState)
6621{
6622 uint32_t u32IntrInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
6623 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6624 if (fErrorCodeValid)
6625 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6626 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
6627 puIntrState);
6628}
6629
6630
6631/**
6632 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
6633 *
6634 * @param pVCpu Pointer to the VMCPU.
6635 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6636 * out-of-sync. Make sure to update the required fields
6637 * before using them.
6638 * @param uVector The software interrupt vector number.
6639 * @param cbInstr The value of RIP that is to be pushed on the guest
6640 * stack.
6641 */
6642DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
6643{
6644 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6645 if ( uVector == X86_XCPT_BP
6646 || uVector == X86_XCPT_OF)
6647 {
6648 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6649 }
6650 else
6651 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6652 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6653}
6654
6655
6656/**
6657 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6658 * stack.
6659 *
6660 * @returns VBox status code (information status code included).
6661 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6662 * @param pVM Pointer to the VM.
6663 * @param pMixedCtx Pointer to the guest-CPU context.
6664 * @param uValue The value to push to the guest stack.
6665 */
6666DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6667{
6668 /*
6669 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6670 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6671 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6672 */
6673 if (pMixedCtx->sp == 1)
6674 return VINF_EM_RESET;
6675 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6676 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6677 AssertRCReturn(rc, rc);
6678 return rc;
6679}
6680
6681
6682/**
6683 * Injects an event into the guest upon VM-entry by updating the relevant fields
6684 * in the VM-entry area in the VMCS.
6685 *
6686 * @returns VBox status code (informational error codes included).
6687 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6688 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6689 *
6690 * @param pVCpu Pointer to the VMCPU.
6691 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6692 * be out-of-sync. Make sure to update the required
6693 * fields before using them.
6694 * @param u64IntrInfo The VM-entry interruption-information field.
6695 * @param cbInstr The VM-entry instruction length in bytes (for
6696 * software interrupts, exceptions and privileged
6697 * software exceptions).
6698 * @param u32ErrCode The VM-entry exception error code.
6699 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
6700 * @param puIntrState Pointer to the current guest interruptibility-state.
6701 * This interruptibility-state will be updated if
6702 * necessary. This cannot not be NULL.
6703 *
6704 * @remarks No-long-jump zone!!!
6705 * @remarks Requires CR0!
6706 */
6707static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6708 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
6709{
6710 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6711 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6712 Assert(puIntrState);
6713 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6714
6715 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6716 const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
6717
6718#ifdef VBOX_STRICT
6719 /* Validate the error-code-valid bit for hardware exceptions. */
6720 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
6721 {
6722 switch (uVector)
6723 {
6724 case X86_XCPT_PF:
6725 case X86_XCPT_DF:
6726 case X86_XCPT_TS:
6727 case X86_XCPT_NP:
6728 case X86_XCPT_SS:
6729 case X86_XCPT_GP:
6730 case X86_XCPT_AC:
6731 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo),
6732 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
6733 /* fallthru */
6734 default:
6735 break;
6736 }
6737 }
6738#endif
6739
6740 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6741 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6742 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
6743
6744 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6745
6746 /* We require CR0 to check if the guest is in real-mode. */
6747 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6748 AssertRCReturn(rc, rc);
6749
6750 /*
6751 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6752 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6753 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6754 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6755 */
6756 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6757 {
6758 PVM pVM = pVCpu->CTX_SUFF(pVM);
6759 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6760 {
6761 Assert(PDMVmmDevHeapIsEnabled(pVM));
6762 Assert(pVM->hm.s.vmx.pRealModeTSS);
6763
6764 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
6765 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6766 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6767 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6768 AssertRCReturn(rc, rc);
6769 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
6770
6771 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6772 const size_t cbIdtEntry = 4;
6773 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6774 {
6775 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6776 if (uVector == X86_XCPT_DF)
6777 return VINF_EM_RESET;
6778 else if (uVector == X86_XCPT_GP)
6779 {
6780 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6781 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
6782 }
6783
6784 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6785 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6786 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
6787 }
6788
6789 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6790 uint16_t uGuestIp = pMixedCtx->ip;
6791 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6792 {
6793 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6794 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6795 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6796 }
6797 else if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6798 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6799
6800 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6801 uint16_t offIdtEntry = 0;
6802 RTSEL selIdtEntry = 0;
6803 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6804 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6805 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6806 AssertRCReturn(rc, rc);
6807
6808 /* Construct the stack frame for the interrupt/exception handler. */
6809 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6810 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6811 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6812 AssertRCReturn(rc, rc);
6813
6814 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6815 if (rc == VINF_SUCCESS)
6816 {
6817 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6818 pMixedCtx->rip = offIdtEntry;
6819 pMixedCtx->cs.Sel = selIdtEntry;
6820 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6821 if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6822 && uVector == X86_XCPT_PF)
6823 {
6824 pMixedCtx->cr2 = GCPtrFaultAddress;
6825 }
6826
6827 /* If any other guest-state bits are changed here, make sure to update
6828 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
6829 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6830 | HM_CHANGED_GUEST_RIP
6831 | HM_CHANGED_GUEST_RFLAGS
6832 | HM_CHANGED_GUEST_RSP;
6833
6834 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
6835 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
6836 {
6837 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6838 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6839 Log4(("Clearing inhibition due to STI.\n"));
6840 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
6841 }
6842 Log4(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6843 }
6844 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6845 return rc;
6846 }
6847 else
6848 {
6849 /*
6850 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6851 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6852 */
6853 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6854 }
6855 }
6856
6857 /* Validate. */
6858 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6859 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6860 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6861
6862 /* Inject. */
6863 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6864 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6865 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6866 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6867
6868 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6869 && uVector == X86_XCPT_PF)
6870 {
6871 pMixedCtx->cr2 = GCPtrFaultAddress;
6872 }
6873
6874 Log4(("Injecting vcpu[%RU32] u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
6875 u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
6876
6877 AssertRCReturn(rc, rc);
6878 return rc;
6879}
6880
6881
6882/**
6883 * Enters the VT-x session.
6884 *
6885 * @returns VBox status code.
6886 * @param pVM Pointer to the VM.
6887 * @param pVCpu Pointer to the VMCPU.
6888 * @param pCpu Pointer to the CPU info struct.
6889 */
6890VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
6891{
6892 AssertPtr(pVM);
6893 AssertPtr(pVCpu);
6894 Assert(pVM->hm.s.vmx.fSupported);
6895 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6896 NOREF(pCpu);
6897
6898 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6899
6900#ifdef VBOX_STRICT
6901 /* Make sure we're in VMX root mode. */
6902 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6903 if (!(u32HostCR4 & X86_CR4_VMXE))
6904 {
6905 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6906 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6907 }
6908#endif
6909
6910 /*
6911 * Load the VCPU's VMCS as the current (and active) one.
6912 */
6913 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
6914 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
6915 if (RT_FAILURE(rc))
6916 return rc;
6917
6918 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
6919 pVCpu->hm.s.fLeaveDone = false;
6920 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
6921
6922 return VINF_SUCCESS;
6923}
6924
6925
6926/**
6927 * The thread-context callback (only on platforms which support it).
6928 *
6929 * @param enmEvent The thread-context event.
6930 * @param pVCpu Pointer to the VMCPU.
6931 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
6932 * @thread EMT.
6933 */
6934VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
6935{
6936 switch (enmEvent)
6937 {
6938 case RTTHREADCTXEVENT_PREEMPTING:
6939 {
6940 /** @todo Stats. */
6941 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6942 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
6943 VMCPU_ASSERT_EMT(pVCpu);
6944
6945 PVM pVM = pVCpu->CTX_SUFF(pVM);
6946 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
6947
6948 /* No longjmps (logger flushes, locks) in this fragile context. */
6949 VMMRZCallRing3Disable(pVCpu);
6950 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
6951
6952 /* Save the guest-state, restore host-state (FPU, debug etc.). */
6953 if (!pVCpu->hm.s.fLeaveDone)
6954 {
6955 hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
6956 pVCpu->hm.s.fLeaveDone = true;
6957 }
6958
6959 /* Leave HM context, takes care of local init (term). */
6960 int rc = HMR0LeaveCpu(pVCpu);
6961 AssertRC(rc); NOREF(rc);
6962
6963 /* Restore longjmp state. */
6964 VMMRZCallRing3Enable(pVCpu);
6965 break;
6966 }
6967
6968 case RTTHREADCTXEVENT_RESUMED:
6969 {
6970 /** @todo Stats. */
6971 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6972 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
6973 VMCPU_ASSERT_EMT(pVCpu);
6974
6975 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
6976 VMMRZCallRing3Disable(pVCpu);
6977 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
6978
6979 /* Initialize the bare minimum state required for HM. This takes care of
6980 initializing VT-x if necessary (onlined CPUs, local init etc.) */
6981 int rc = HMR0EnterCpu(pVCpu);
6982 AssertRC(rc);
6983 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
6984
6985 /* Load the active VMCS as the current one. */
6986 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
6987 {
6988 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
6989 AssertRC(rc); NOREF(rc);
6990 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
6991 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
6992 }
6993 pVCpu->hm.s.fLeaveDone = false;
6994 VMMRZCallRing3Enable(pVCpu);
6995 break;
6996 }
6997
6998 default:
6999 break;
7000 }
7001}
7002
7003
7004/**
7005 * Saves the host state in the VMCS host-state.
7006 * Sets up the VM-exit MSR-load area.
7007 *
7008 * The CPU state will be loaded from these fields on every successful VM-exit.
7009 *
7010 * @returns VBox status code.
7011 * @param pVM Pointer to the VM.
7012 * @param pVCpu Pointer to the VMCPU.
7013 *
7014 * @remarks No-long-jump zone!!!
7015 */
7016static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
7017{
7018 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7019
7020 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
7021 return VINF_SUCCESS;
7022
7023 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
7024 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7025
7026 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
7027 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7028
7029 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
7030 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7031
7032 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
7033 return rc;
7034}
7035
7036
7037/**
7038 * Saves the host state in the VMCS host-state.
7039 *
7040 * @returns VBox status code.
7041 * @param pVM Pointer to the VM.
7042 * @param pVCpu Pointer to the VMCPU.
7043 *
7044 * @remarks No-long-jump zone!!!
7045 */
7046VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
7047{
7048 AssertPtr(pVM);
7049 AssertPtr(pVCpu);
7050
7051 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
7052
7053 /* When thread-context hooks are available, this is done later (when preemption/interrupts are disabled). */
7054 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
7055 {
7056 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7057 return hmR0VmxSaveHostState(pVM, pVCpu);
7058 }
7059 return VINF_SUCCESS;
7060}
7061
7062
7063/**
7064 * Loads the guest state into the VMCS guest-state area. The CPU state will be
7065 * loaded from these fields on every successful VM-entry.
7066 *
7067 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
7068 * Sets up the VM-entry controls.
7069 * Sets up the appropriate VMX non-root function to execute guest code based on
7070 * the guest CPU mode.
7071 *
7072 * @returns VBox status code.
7073 * @param pVM Pointer to the VM.
7074 * @param pVCpu Pointer to the VMCPU.
7075 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7076 * out-of-sync. Make sure to update the required fields
7077 * before using them.
7078 *
7079 * @remarks No-long-jump zone!!!
7080 */
7081static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7082{
7083 AssertPtr(pVM);
7084 AssertPtr(pVCpu);
7085 AssertPtr(pMixedCtx);
7086 HMVMX_ASSERT_PREEMPT_SAFE();
7087
7088#ifdef LOG_ENABLED
7089 /** @todo r=ramshankar: I'm not able to use VMMRZCallRing3Disable() here,
7090 * probably not initialized yet? Anyway this will do for now.
7091 *
7092 * Update: Should be possible once VMXR0LoadGuestState() is removed as an
7093 * interface and disable ring-3 calls when thread-context hooks are not
7094 * available. */
7095 bool fCallerDisabledLogFlush = VMMR0IsLogFlushDisabled(pVCpu);
7096 VMMR0LogFlushDisable(pVCpu);
7097#endif
7098
7099 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
7100
7101 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
7102
7103 /* Determine real-on-v86 mode. */
7104 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
7105 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
7106 && CPUMIsGuestInRealModeEx(pMixedCtx))
7107 {
7108 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
7109 }
7110
7111 /*
7112 * Load the guest-state into the VMCS.
7113 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
7114 * Ideally, assert that the cross-dependent bits are up to date at the point of using it.
7115 */
7116 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
7117 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7118
7119 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
7120 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7121
7122 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
7123 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7124
7125 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
7126 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7127
7128 /* Assumes CR0 is up-to-date (strict builds require CR0 for segment register validation checks). */
7129 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
7130 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7131
7132 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
7133 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7134
7135 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
7136 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7137
7138 /*
7139 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
7140 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
7141 */
7142 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
7143 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7144
7145 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
7146 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7147
7148 /* Clear any unused and reserved bits. */
7149 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
7150
7151#ifdef LOG_ENABLED
7152 /* Only reenable log-flushing if the caller has it enabled. */
7153 if (!fCallerDisabledLogFlush)
7154 VMMR0LogFlushEnable(pVCpu);
7155#endif
7156
7157 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
7158 return rc;
7159}
7160
7161
7162/**
7163 * Loads the state shared between the host and guest into the VMCS.
7164 *
7165 * @param pVM Pointer to the VM.
7166 * @param pVCpu Pointer to the VMCPU.
7167 * @param pCtx Pointer to the guest-CPU context.
7168 *
7169 * @remarks No-long-jump zone!!!
7170 */
7171static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7172{
7173 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7174 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7175
7176 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
7177 {
7178 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
7179 AssertRC(rc);
7180 }
7181
7182 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
7183 {
7184 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
7185 AssertRC(rc);
7186
7187 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
7188 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
7189 {
7190 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
7191 AssertRC(rc);
7192 }
7193 }
7194
7195 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",
7196 pVCpu->hm.s.fContextUseFlags));
7197}
7198
7199
7200/**
7201 * Worker for loading the guest-state bits in the inner VT-x execution loop.
7202 *
7203 * @param pVM Pointer to the VM.
7204 * @param pVCpu Pointer to the VMCPU.
7205 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7206 * out-of-sync. Make sure to update the required fields
7207 * before using them.
7208 */
7209DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7210{
7211 HMVMX_ASSERT_PREEMPT_SAFE();
7212
7213 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
7214#ifdef HMVMX_SYNC_FULL_GUEST_STATE
7215 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
7216#endif
7217
7218 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
7219 {
7220 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
7221 AssertRC(rc);
7222 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
7223 }
7224 else if (pVCpu->hm.s.fContextUseFlags)
7225 {
7226 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
7227 AssertRC(rc);
7228 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
7229 }
7230
7231 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */
7232 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
7233 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),
7234 ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
7235
7236#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
7237 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
7238 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
7239 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
7240#endif
7241}
7242
7243
7244/**
7245 * Does the preparations before executing guest code in VT-x.
7246 *
7247 * This may cause longjmps to ring-3 and may even result in rescheduling to the
7248 * recompiler. We must be cautious what we do here regarding committing
7249 * guest-state information into the VMCS assuming we assuredly execute the
7250 * guest in VT-x. If we fall back to the recompiler after updating the VMCS and
7251 * clearing the common-state (TRPM/forceflags), we must undo those changes so
7252 * that the recompiler can (and should) use them when it resumes guest
7253 * execution. Otherwise such operations must be done when we can no longer
7254 * exit to ring-3.
7255 *
7256 * @returns VBox status code (informational status codes included).
7257 * @retval VINF_SUCCESS if we can proceed with running the guest.
7258 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
7259 * into the guest.
7260 * @retval VINF_* scheduling changes, we have to go back to ring-3.
7261 *
7262 * @param pVM Pointer to the VM.
7263 * @param pVCpu Pointer to the VMCPU.
7264 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7265 * out-of-sync. Make sure to update the required fields
7266 * before using them.
7267 * @param pVmxTransient Pointer to the VMX transient structure.
7268 *
7269 * @remarks Called with preemption disabled.
7270 */
7271static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7272{
7273 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7274
7275#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
7276 PGMRZDynMapFlushAutoSet(pVCpu);
7277#endif
7278
7279 /* Check force flag actions that might require us to go back to ring-3. */
7280 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
7281 if (rc != VINF_SUCCESS)
7282 return rc;
7283
7284#ifndef IEM_VERIFICATION_MODE_FULL
7285 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
7286 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
7287 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
7288 {
7289 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
7290 RTGCPHYS GCPhysApicBase;
7291 GCPhysApicBase = pMixedCtx->msrApicBase;
7292 GCPhysApicBase &= PAGE_BASE_GC_MASK;
7293
7294 /* Unalias any existing mapping. */
7295 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
7296 AssertRCReturn(rc, rc);
7297
7298 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
7299 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
7300 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
7301 AssertRCReturn(rc, rc);
7302
7303 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
7304 }
7305#endif /* !IEM_VERIFICATION_MODE_FULL */
7306
7307 /* Load the guest state bits, we can handle longjmps/getting preempted here. */
7308 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
7309
7310 /*
7311 * Evaluate events as pending-for-injection into the guest. Toggling of force-flags here is safe as long as
7312 * we update TRPM on premature exits to ring-3 before executing guest code. We must NOT restore the force-flags.
7313 */
7314 if (TRPMHasTrap(pVCpu))
7315 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
7316 else if (!pVCpu->hm.s.Event.fPending)
7317 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
7318
7319 /*
7320 * No longjmps to ring-3 from this point on!!!
7321 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
7322 * This also disables flushing of the R0-logger instance (if any).
7323 */
7324 VMMRZCallRing3Disable(pVCpu);
7325
7326 /*
7327 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
7328 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
7329 *
7330 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
7331 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
7332 *
7333 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
7334 * executing guest code.
7335 */
7336 pVmxTransient->uEflags = ASMIntDisableFlags();
7337 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
7338 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
7339 {
7340 ASMSetFlags(pVmxTransient->uEflags);
7341 VMMRZCallRing3Enable(pVCpu);
7342 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
7343 return VINF_EM_RAW_TO_R3;
7344 }
7345 else if (RTThreadPreemptIsPending(NIL_RTTHREAD))
7346 {
7347 ASMSetFlags(pVmxTransient->uEflags);
7348 VMMRZCallRing3Enable(pVCpu);
7349 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
7350 return VINF_EM_RAW_INTERRUPT;
7351 }
7352
7353 /*
7354 * Event injection might result in triple-faulting the VM (real-on-v86 case), which is why it's
7355 * done here and not in hmR0VmxPreRunGuestCommitted() which doesn't expect failures.
7356 */
7357 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
7358 if (RT_UNLIKELY(rc != VINF_SUCCESS))
7359 {
7360 ASMSetFlags(pVmxTransient->uEflags);
7361 VMMRZCallRing3Enable(pVCpu);
7362 return rc;
7363 }
7364
7365 return VINF_SUCCESS;
7366}
7367
7368
7369/**
7370 * Prepares to run guest code in VT-x and we've committed to doing so. This
7371 * means there is no backing out to ring-3 or anywhere else at this
7372 * point.
7373 *
7374 * @param pVM Pointer to the VM.
7375 * @param pVCpu Pointer to the VMCPU.
7376 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7377 * out-of-sync. Make sure to update the required fields
7378 * before using them.
7379 * @param pVmxTransient Pointer to the VMX transient structure.
7380 *
7381 * @remarks Called with preemption disabled.
7382 * @remarks No-long-jump zone!!!
7383 */
7384static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7385{
7386 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7387 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7388 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7389
7390 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
7391 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
7392
7393 /*
7394 * Load the host state bits as we may've been preempted (only happens when
7395 * thread-context hooks are used).
7396 */
7397 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
7398 {
7399 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
7400 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
7401 AssertRC(rc);
7402 }
7403 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
7404
7405 /*
7406 * If we are injecting events to a real-on-v86 mode guest, we may have to update
7407 * RIP and some other registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
7408 * Reload only the necessary state, the assertion will catch if other parts of the code
7409 * change.
7410 */
7411 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
7412 {
7413 hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
7414 hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
7415 }
7416
7417 /* Load the state shared between host and guest (FPU, debug). */
7418 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)
7419 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
7420 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
7421
7422 /*
7423 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
7424 */
7425 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
7426 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
7427
7428 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
7429 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
7430 {
7431 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
7432 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
7433 }
7434
7435 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
7436 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
7437
7438 RTCPUID idCurrentCpu = HMR0GetCurrentCpu()->idCpu;
7439 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
7440 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
7441
7442 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
7443
7444 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
7445 to start executing. */
7446
7447#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
7448 /*
7449 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
7450 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
7451 */
7452 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
7453 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
7454 {
7455 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
7456 uint64_t u64HostTscAux = 0;
7457 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
7458 AssertRC(rc2);
7459 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
7460 }
7461#endif
7462}
7463
7464
7465/**
7466 * Performs some essential restoration of state after running guest code in
7467 * VT-x.
7468 *
7469 * @param pVM Pointer to the VM.
7470 * @param pVCpu Pointer to the VMCPU.
7471 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7472 * out-of-sync. Make sure to update the required fields
7473 * before using them.
7474 * @param pVmxTransient Pointer to the VMX transient structure.
7475 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
7476 *
7477 * @remarks Called with interrupts disabled.
7478 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
7479 * unconditionally when it is safe to do so.
7480 */
7481static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
7482{
7483 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7484
7485 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
7486 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
7487 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
7488 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
7489 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
7490
7491 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
7492 {
7493#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
7494 /* Restore host's TSC_AUX. */
7495 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
7496 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
7497#endif
7498 /** @todo Find a way to fix hardcoding a guestimate. */
7499 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
7500 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
7501 }
7502
7503 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
7504 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
7505 Assert(!(ASMGetFlags() & X86_EFL_IF));
7506 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
7507
7508 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
7509 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
7510 ASMSetFlags(pVmxTransient->uEflags); /* Enable interrupts. */
7511 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
7512
7513 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
7514 uint32_t uExitReason;
7515 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
7516 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7517 AssertRC(rc);
7518 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
7519 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
7520
7521 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
7522 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
7523 {
7524 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
7525 pVmxTransient->fVMEntryFailed));
7526 return;
7527 }
7528
7529 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
7530 {
7531 /* Update the guest interruptibility-state from the VMCS. */
7532 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
7533#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
7534 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7535 AssertRC(rc);
7536#endif
7537 /*
7538 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
7539 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
7540 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
7541 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
7542 */
7543 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
7544 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
7545 {
7546 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
7547 AssertRC(rc);
7548 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7549 }
7550 }
7551}
7552
7553
7554
7555/**
7556 * Runs the guest code using VT-x the normal way.
7557 *
7558 * @returns VBox status code.
7559 * @param pVM Pointer to the VM.
7560 * @param pVCpu Pointer to the VMCPU.
7561 * @param pCtx Pointer to the guest-CPU context.
7562 *
7563 * @note Mostly the same as hmR0VmxRunGuestCodeStep.
7564 * @remarks Called with preemption disabled.
7565 */
7566static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7567{
7568 VMXTRANSIENT VmxTransient;
7569 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
7570 int rc = VERR_INTERNAL_ERROR_5;
7571 uint32_t cLoops = 0;
7572
7573 for (;; cLoops++)
7574 {
7575 Assert(!HMR0SuspendPending());
7576 HMVMX_ASSERT_CPU_SAFE();
7577
7578 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
7579 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
7580 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
7581 if (rc != VINF_SUCCESS)
7582 break;
7583
7584 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
7585 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
7586 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
7587
7588 /* Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state. */
7589 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
7590
7591 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
7592 if (RT_UNLIKELY(rc != VINF_SUCCESS))
7593 {
7594 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
7595 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
7596 return rc;
7597 }
7598
7599 /* Handle the VM-exit. */
7600 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
7601 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
7602 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
7603 HMVMX_START_EXIT_DISPATCH_PROF();
7604#ifdef HMVMX_USE_FUNCTION_TABLE
7605 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
7606#else
7607 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
7608#endif
7609 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
7610 if (rc != VINF_SUCCESS)
7611 break;
7612 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
7613 {
7614 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
7615 rc = VINF_EM_RAW_INTERRUPT;
7616 break;
7617 }
7618 }
7619
7620 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
7621 return rc;
7622}
7623
7624
7625/**
7626 * Single steps guest code using VT-x.
7627 *
7628 * @returns VBox status code.
7629 * @param pVM Pointer to the VM.
7630 * @param pVCpu Pointer to the VMCPU.
7631 * @param pCtx Pointer to the guest-CPU context.
7632 *
7633 * @note Mostly the same as hmR0VmxRunGuestCodeNormal.
7634 * @remarks Called with preemption disabled.
7635 */
7636static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7637{
7638 VMXTRANSIENT VmxTransient;
7639 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
7640 int rc = VERR_INTERNAL_ERROR_5;
7641 uint32_t cLoops = 0;
7642 uint16_t uCsStart = pCtx->cs.Sel;
7643 uint64_t uRipStart = pCtx->rip;
7644
7645 for (;; cLoops++)
7646 {
7647 Assert(!HMR0SuspendPending());
7648 HMVMX_ASSERT_CPU_SAFE();
7649
7650 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
7651 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
7652 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
7653 if (rc != VINF_SUCCESS)
7654 break;
7655
7656 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
7657 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
7658 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
7659
7660 /* Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state. */
7661 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
7662
7663 /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
7664 if (RT_UNLIKELY(rc != VINF_SUCCESS))
7665 {
7666 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
7667 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
7668 return rc;
7669 }
7670
7671 /* Handle the VM-exit. */
7672 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
7673 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
7674 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
7675 HMVMX_START_EXIT_DISPATCH_PROF();
7676#ifdef HMVMX_USE_FUNCTION_TABLE
7677 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
7678#else
7679 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
7680#endif
7681 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
7682 if (rc != VINF_SUCCESS)
7683 break;
7684 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
7685 {
7686 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
7687 rc = VINF_EM_RAW_INTERRUPT;
7688 break;
7689 }
7690
7691 /*
7692 * Did the RIP change, if so, consider it a single step.
7693 * Otherwise, make sure one of the TFs gets set.
7694 */
7695 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
7696 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
7697 AssertRCReturn(rc2, rc2);
7698 if ( pCtx->rip != uRipStart
7699 || pCtx->cs.Sel != uCsStart)
7700 {
7701 rc = VINF_EM_DBG_STEPPED;
7702 break;
7703 }
7704 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
7705 }
7706
7707 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
7708 return rc;
7709}
7710
7711
7712/**
7713 * Runs the guest code using VT-x.
7714 *
7715 * @returns VBox status code.
7716 * @param pVM Pointer to the VM.
7717 * @param pVCpu Pointer to the VMCPU.
7718 * @param pCtx Pointer to the guest-CPU context.
7719 *
7720 * @remarks Called with preemption disabled.
7721 */
7722VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7723{
7724 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7725 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
7726 HMVMX_ASSERT_PREEMPT_SAFE();
7727
7728 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
7729
7730 int rc;
7731 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
7732 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
7733 else
7734 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
7735
7736 if (rc == VERR_EM_INTERPRETER)
7737 rc = VINF_EM_RAW_EMULATE_INSTR;
7738 else if (rc == VINF_EM_RESET)
7739 rc = VINF_EM_TRIPLE_FAULT;
7740
7741 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
7742 VMMRZCallRing3RemoveNotification(pVCpu);
7743 return rc;
7744}
7745
7746
7747#ifndef HMVMX_USE_FUNCTION_TABLE
7748DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
7749{
7750 int rc;
7751 switch (rcReason)
7752 {
7753 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
7754 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
7755 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
7756 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
7757 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
7758 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
7759 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7760 case VMX_EXIT_XCPT_OR_NMI: rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); break;
7761 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
7762 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
7763 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7764 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
7765 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
7766 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
7767 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
7768 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7769 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7770 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
7771 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
7772 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
7773 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
7774 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
7775 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
7776 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
7777 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
7778 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7779 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7780 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
7781 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
7782 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
7783 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
7784 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
7785 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
7786
7787 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
7788 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7789 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
7790 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
7791 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7792 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7793 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
7794 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
7795 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
7796
7797 case VMX_EXIT_VMCALL:
7798 case VMX_EXIT_VMCLEAR:
7799 case VMX_EXIT_VMLAUNCH:
7800 case VMX_EXIT_VMPTRLD:
7801 case VMX_EXIT_VMPTRST:
7802 case VMX_EXIT_VMREAD:
7803 case VMX_EXIT_VMRESUME:
7804 case VMX_EXIT_VMWRITE:
7805 case VMX_EXIT_VMXOFF:
7806 case VMX_EXIT_VMXON:
7807 case VMX_EXIT_INVEPT:
7808 case VMX_EXIT_INVVPID:
7809 case VMX_EXIT_VMFUNC:
7810 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
7811 break;
7812 default:
7813 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
7814 break;
7815 }
7816 return rc;
7817}
7818#endif
7819
7820#ifdef DEBUG
7821/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
7822# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
7823 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
7824
7825# define HMVMX_ASSERT_PREEMPT_CPUID() \
7826 do \
7827 { \
7828 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
7829 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
7830 } while (0)
7831
7832# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
7833 do { \
7834 AssertPtr(pVCpu); \
7835 AssertPtr(pMixedCtx); \
7836 AssertPtr(pVmxTransient); \
7837 Assert(pVmxTransient->fVMEntryFailed == false); \
7838 Assert(ASMIntAreEnabled()); \
7839 HMVMX_ASSERT_PREEMPT_SAFE(); \
7840 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
7841 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
7842 HMVMX_ASSERT_PREEMPT_SAFE(); \
7843 if (VMMR0IsLogFlushDisabled(pVCpu)) \
7844 HMVMX_ASSERT_PREEMPT_CPUID(); \
7845 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
7846 } while (0)
7847
7848# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
7849 do { \
7850 Log4Func(("\n")); \
7851 } while(0)
7852#else /* Release builds */
7853# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
7854# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
7855#endif
7856
7857
7858/**
7859 * Advances the guest RIP after reading it from the VMCS.
7860 *
7861 * @returns VBox status code.
7862 * @param pVCpu Pointer to the VMCPU.
7863 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7864 * out-of-sync. Make sure to update the required fields
7865 * before using them.
7866 * @param pVmxTransient Pointer to the VMX transient structure.
7867 *
7868 * @remarks No-long-jump zone!!!
7869 */
7870DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7871{
7872 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7873 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7874 AssertRCReturn(rc, rc);
7875
7876 pMixedCtx->rip += pVmxTransient->cbInstr;
7877 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7878 return rc;
7879}
7880
7881
7882/**
7883 * Tries to determine what part of the guest-state VT-x has deemed as invalid
7884 * and update error record fields accordingly.
7885 *
7886 * @return VMX_IGS_* return codes.
7887 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
7888 * wrong with the guest state.
7889 *
7890 * @param pVM Pointer to the VM.
7891 * @param pVCpu Pointer to the VMCPU.
7892 * @param pCtx Pointer to the guest-CPU state.
7893 */
7894static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7895{
7896#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
7897#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
7898 uError = (err); \
7899 break; \
7900 } else do {} while (0)
7901/* Duplicate of IEM_IS_CANONICAL(). */
7902#define HMVMX_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
7903
7904 int rc;
7905 uint64_t u64Val;
7906 uint32_t u32Val;
7907 uint32_t uError = VMX_IGS_ERROR;
7908 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
7909
7910 do
7911 {
7912 /*
7913 * CR0.
7914 */
7915 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 & pVM->hm.s.vmx.msr.u64Cr0Fixed1);
7916 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.u64Cr0Fixed0 | pVM->hm.s.vmx.msr.u64Cr0Fixed1);
7917 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
7918 See Intel spec. 26.3.1 "Checks on guest Guest Control Registers, Debug Registers and MSRs." */
7919 if (fUnrestrictedGuest)
7920 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
7921
7922 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);
7923 AssertRCBreak(rc);
7924 HMVMX_CHECK_BREAK((u32Val & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
7925 HMVMX_CHECK_BREAK(!(u32Val & ~uZapCR0), VMX_IGS_CR0_FIXED0);
7926 if ( !fUnrestrictedGuest
7927 && (u32Val & X86_CR0_PG)
7928 && !(u32Val & X86_CR0_PE))
7929 {
7930 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
7931 }
7932
7933 /*
7934 * CR4.
7935 */
7936 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 & pVM->hm.s.vmx.msr.u64Cr4Fixed1);
7937 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.u64Cr4Fixed0 | pVM->hm.s.vmx.msr.u64Cr4Fixed1);
7938 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
7939 AssertRCBreak(rc);
7940 HMVMX_CHECK_BREAK((u32Val & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
7941 HMVMX_CHECK_BREAK(!(u32Val & ~uZapCR4), VMX_IGS_CR4_FIXED0);
7942
7943 /*
7944 * IA32_DEBUGCTL MSR.
7945 */
7946 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
7947 AssertRCBreak(rc);
7948 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
7949 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
7950 {
7951 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
7952 }
7953 uint64_t u64DebugCtlMsr = u64Val;
7954
7955#ifdef VBOX_STRICT
7956 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
7957 AssertRCBreak(rc);
7958 Assert(u32Val == pVCpu->hm.s.vmx.u32ProcCtls);
7959#endif
7960 bool const fLongModeGuest = !!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
7961
7962 /*
7963 * RIP and RFLAGS.
7964 */
7965 uint32_t u32Eflags;
7966#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
7967 if (HMVMX_IS_64BIT_HOST_MODE())
7968 {
7969 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
7970 AssertRCBreak(rc);
7971 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
7972 if ( !fLongModeGuest
7973 || !pCtx->cs.Attr.n.u1Long)
7974 {
7975 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
7976 }
7977 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
7978 * must be identical if the "IA32e mode guest" VM-entry control is 1
7979 * and CS.L is 1. No check applies if the CPU supports 64
7980 * linear-address bits. */
7981
7982 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
7983 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
7984 AssertRCBreak(rc);
7985 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
7986 VMX_IGS_RFLAGS_RESERVED);
7987 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
7988 u32Eflags = u64Val;
7989 }
7990 else
7991#endif
7992 {
7993 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
7994 AssertRCBreak(rc);
7995 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
7996 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
7997 }
7998
7999 if ( fLongModeGuest
8000 || !(pCtx->cr0 & X86_CR0_PE))
8001 {
8002 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
8003 }
8004
8005 uint32_t u32EntryInfo;
8006 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
8007 AssertRCBreak(rc);
8008 if ( VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo)
8009 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
8010 {
8011 HMVMX_CHECK_BREAK(u32Val & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
8012 }
8013
8014 /*
8015 * 64-bit checks.
8016 */
8017#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8018 if (HMVMX_IS_64BIT_HOST_MODE())
8019 {
8020 if ( fLongModeGuest
8021 && !fUnrestrictedGuest)
8022 {
8023 HMVMX_CHECK_BREAK(CPUMIsGuestPagingEnabledEx(pCtx), VMX_IGS_CR0_PG_LONGMODE);
8024 HMVMX_CHECK_BREAK((pCtx->cr4 & X86_CR4_PAE), VMX_IGS_CR4_PAE_LONGMODE);
8025 }
8026
8027 if ( !fLongModeGuest
8028 && (pCtx->cr4 & X86_CR4_PCIDE))
8029 {
8030 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
8031 }
8032
8033 /** @todo CR3 field must be such that bits 63:52 and bits in the range
8034 * 51:32 beyond the processor's physical-address width are 0. */
8035
8036 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
8037 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
8038 {
8039 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
8040 }
8041
8042 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
8043 AssertRCBreak(rc);
8044 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
8045
8046 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
8047 AssertRCBreak(rc);
8048 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
8049 }
8050#endif
8051
8052 /*
8053 * PERF_GLOBAL MSR.
8054 */
8055 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
8056 {
8057 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
8058 AssertRCBreak(rc);
8059 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
8060 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
8061 }
8062
8063 /*
8064 * PAT MSR.
8065 */
8066 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
8067 {
8068 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
8069 AssertRCBreak(rc);
8070 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
8071 for (unsigned i = 0; i < 8; i++)
8072 {
8073 uint8_t u8Val = (u64Val & 0x7);
8074 if ( u8Val != 0 /* UC */
8075 || u8Val != 1 /* WC */
8076 || u8Val != 4 /* WT */
8077 || u8Val != 5 /* WP */
8078 || u8Val != 6 /* WB */
8079 || u8Val != 7 /* UC- */)
8080 {
8081 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
8082 }
8083 u64Val >>= 3;
8084 }
8085 }
8086
8087 /*
8088 * EFER MSR.
8089 */
8090 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
8091 {
8092 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
8093 AssertRCBreak(rc);
8094 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
8095 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
8096 HMVMX_CHECK_BREAK((u64Val & MSR_K6_EFER_LMA) == (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
8097 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
8098 HMVMX_CHECK_BREAK( fUnrestrictedGuest
8099 || (u64Val & MSR_K6_EFER_LMA) == (pCtx->cr0 & X86_CR0_PG), VMX_IGS_EFER_LMA_PG_MISMATCH);
8100 }
8101
8102 /*
8103 * Segment registers.
8104 */
8105 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
8106 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
8107 if (!(u32Eflags & X86_EFL_VM))
8108 {
8109 /* CS */
8110 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
8111 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
8112 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
8113 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
8114 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
8115 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
8116 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
8117 /* CS cannot be loaded with NULL in protected mode. */
8118 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
8119 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
8120 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
8121 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
8122 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
8123 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
8124 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
8125 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
8126 else
8127 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
8128
8129 /* SS */
8130 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8131 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
8132 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
8133 if ( !(pCtx->cr0 & X86_CR0_PE)
8134 || pCtx->cs.Attr.n.u4Type == 3)
8135 {
8136 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
8137 }
8138 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
8139 {
8140 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
8141 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
8142 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
8143 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
8144 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
8145 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
8146 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
8147 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
8148 }
8149
8150 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
8151 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
8152 {
8153 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
8154 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
8155 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8156 || pCtx->ds.Attr.n.u4Type > 11
8157 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
8158 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
8159 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
8160 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
8161 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
8162 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
8163 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
8164 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
8165 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
8166 }
8167 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
8168 {
8169 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
8170 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
8171 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8172 || pCtx->es.Attr.n.u4Type > 11
8173 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
8174 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
8175 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
8176 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
8177 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
8178 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
8179 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
8180 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
8181 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
8182 }
8183 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
8184 {
8185 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
8186 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
8187 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8188 || pCtx->fs.Attr.n.u4Type > 11
8189 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
8190 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
8191 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
8192 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
8193 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
8194 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
8195 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
8196 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
8197 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
8198 }
8199 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
8200 {
8201 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
8202 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
8203 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8204 || pCtx->gs.Attr.n.u4Type > 11
8205 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
8206 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
8207 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
8208 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
8209 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
8210 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
8211 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
8212 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
8213 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
8214 }
8215 /* 64-bit capable CPUs. */
8216#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8217 if (HMVMX_IS_64BIT_HOST_MODE())
8218 {
8219 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
8220 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
8221 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
8222 || HMVMX_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
8223 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
8224 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
8225 VMX_IGS_LONGMODE_SS_BASE_INVALID);
8226 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
8227 VMX_IGS_LONGMODE_DS_BASE_INVALID);
8228 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
8229 VMX_IGS_LONGMODE_ES_BASE_INVALID);
8230 }
8231#endif
8232 }
8233 else
8234 {
8235 /* V86 mode checks. */
8236 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
8237 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8238 {
8239 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
8240 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
8241 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
8242 }
8243 else
8244 {
8245 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
8246 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
8247 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
8248 }
8249
8250 /* CS */
8251 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
8252 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
8253 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
8254 /* SS */
8255 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
8256 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
8257 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
8258 /* DS */
8259 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
8260 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
8261 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
8262 /* ES */
8263 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
8264 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
8265 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
8266 /* FS */
8267 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
8268 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
8269 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
8270 /* GS */
8271 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
8272 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
8273 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
8274 /* 64-bit capable CPUs. */
8275#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8276 if (HMVMX_IS_64BIT_HOST_MODE())
8277 {
8278 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
8279 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
8280 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
8281 || HMVMX_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
8282 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
8283 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
8284 VMX_IGS_LONGMODE_SS_BASE_INVALID);
8285 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
8286 VMX_IGS_LONGMODE_DS_BASE_INVALID);
8287 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
8288 VMX_IGS_LONGMODE_ES_BASE_INVALID);
8289 }
8290#endif
8291 }
8292
8293 /*
8294 * TR.
8295 */
8296 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
8297 /* 64-bit capable CPUs. */
8298#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8299 if (HMVMX_IS_64BIT_HOST_MODE())
8300 {
8301 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
8302 }
8303#endif
8304 if (fLongModeGuest)
8305 {
8306 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
8307 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
8308 }
8309 else
8310 {
8311 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
8312 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
8313 VMX_IGS_TR_ATTR_TYPE_INVALID);
8314 }
8315 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
8316 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
8317 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
8318 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
8319 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
8320 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
8321 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
8322 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
8323
8324 /*
8325 * GDTR and IDTR.
8326 */
8327#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8328 if (HMVMX_IS_64BIT_HOST_MODE())
8329 {
8330 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
8331 AssertRCBreak(rc);
8332 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
8333
8334 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
8335 AssertRCBreak(rc);
8336 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
8337 }
8338#endif
8339
8340 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
8341 AssertRCBreak(rc);
8342 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
8343
8344 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
8345 AssertRCBreak(rc);
8346 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
8347
8348 /*
8349 * Guest Non-Register State.
8350 */
8351 /* Activity State. */
8352 uint32_t u32ActivityState;
8353 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
8354 AssertRCBreak(rc);
8355 HMVMX_CHECK_BREAK( !u32ActivityState
8356 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.u64Misc)),
8357 VMX_IGS_ACTIVITY_STATE_INVALID);
8358 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
8359 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
8360 uint32_t u32IntrState;
8361 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
8362 AssertRCBreak(rc);
8363 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
8364 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
8365 {
8366 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
8367 }
8368
8369 /** @todo Activity state and injecting interrupts. Left as a todo since we
8370 * currently don't use activity states but ACTIVE. */
8371
8372 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
8373 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
8374
8375 /* Guest interruptibility-state. */
8376 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
8377 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
8378 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
8379 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
8380 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
8381 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
8382 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
8383 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
8384 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
8385 if (VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo))
8386 {
8387 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
8388 {
8389 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
8390 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
8391 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
8392 }
8393 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
8394 {
8395 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
8396 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
8397 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
8398 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
8399 }
8400 }
8401 /** @todo Assumes the processor is not in SMM. */
8402 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
8403 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
8404 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
8405 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
8406 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
8407 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
8408 && VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo)
8409 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
8410 {
8411 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
8412 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
8413 }
8414
8415 /* Pending debug exceptions. */
8416 if (HMVMX_IS_64BIT_HOST_MODE())
8417 {
8418 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
8419 AssertRCBreak(rc);
8420 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
8421 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
8422 u32Val = u64Val; /* For pending debug exceptions checks below. */
8423 }
8424 else
8425 {
8426 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
8427 AssertRCBreak(rc);
8428 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
8429 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
8430 }
8431
8432 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
8433 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
8434 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
8435 {
8436 if ( (u32Eflags & X86_EFL_TF)
8437 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
8438 {
8439 /* Bit 14 is PendingDebug.BS. */
8440 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
8441 }
8442 if ( !(u32Eflags & X86_EFL_TF)
8443 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
8444 {
8445 /* Bit 14 is PendingDebug.BS. */
8446 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
8447 }
8448 }
8449
8450 /* VMCS link pointer. */
8451 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
8452 AssertRCBreak(rc);
8453 if (u64Val != UINT64_C(0xffffffffffffffff))
8454 {
8455 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
8456 /** @todo Bits beyond the processor's physical-address width MBZ. */
8457 /** @todo 32-bit located in memory referenced by value of this field (as a
8458 * physical address) must contain the processor's VMCS revision ID. */
8459 /** @todo SMM checks. */
8460 }
8461
8462 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries. */
8463
8464 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
8465 if (uError == VMX_IGS_ERROR)
8466 uError = VMX_IGS_REASON_NOT_FOUND;
8467 } while (0);
8468
8469 pVCpu->hm.s.u32HMError = uError;
8470 return uError;
8471
8472#undef HMVMX_ERROR_BREAK
8473#undef HMVMX_CHECK_BREAK
8474#undef HMVMX_IS_CANONICAL
8475}
8476
8477/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8478/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
8479/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8480
8481/** @name VM-exit handlers.
8482 * @{
8483 */
8484
8485/**
8486 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
8487 */
8488HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8489{
8490 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8491 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
8492 /* 32-bit Windows hosts (4 cores) has trouble with this; causes higher interrupt latency. */
8493#if HC_ARCH_BITS == 64
8494 Assert(ASMIntAreEnabled());
8495 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUsePreemptTimer)
8496 return VINF_SUCCESS;
8497#endif
8498 return VINF_EM_RAW_INTERRUPT;
8499}
8500
8501
8502/**
8503 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
8504 */
8505HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8506{
8507 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8508 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
8509
8510 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8511 AssertRCReturn(rc, rc);
8512
8513 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
8514 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
8515 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
8516 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntrInfo));
8517
8518 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
8519 {
8520 /*
8521 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
8522 * anything we inject is not going to cause a VM-exit directly for the event being injected.
8523 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
8524 *
8525 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
8526 */
8527 VMXDispatchHostNmi();
8528 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmi);
8529 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8530 return VINF_SUCCESS;
8531 }
8532
8533 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8534 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8535 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
8536 {
8537 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8538 return VINF_SUCCESS;
8539 }
8540 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8541 {
8542 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8543 return rc;
8544 }
8545
8546 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
8547 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
8548 switch (uIntrType)
8549 {
8550 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
8551 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8552 /* no break */
8553 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
8554 {
8555 switch (uVector)
8556 {
8557 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
8558 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
8559 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
8560 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
8561 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
8562 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
8563#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8564 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
8565 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8566 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
8567 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8568 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8569 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8570 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
8571 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8572 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
8573 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8574#endif
8575 default:
8576 {
8577 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8578 AssertRCReturn(rc, rc);
8579
8580 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
8581 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8582 {
8583 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
8584 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
8585 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8586 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8587 AssertRCReturn(rc, rc);
8588 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
8589 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
8590 0 /* GCPtrFaultAddress */);
8591 AssertRCReturn(rc, rc);
8592 }
8593 else
8594 {
8595 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
8596 pVCpu->hm.s.u32HMError = uVector;
8597 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
8598 }
8599 break;
8600 }
8601 }
8602 break;
8603 }
8604
8605 default:
8606 {
8607 pVCpu->hm.s.u32HMError = uExitIntrInfo;
8608 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
8609 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
8610 break;
8611 }
8612 }
8613 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8614 return rc;
8615}
8616
8617
8618/**
8619 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
8620 */
8621HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8622{
8623 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8624
8625 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
8626 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
8627 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
8628 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8629 AssertRCReturn(rc, rc);
8630
8631 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
8632 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
8633 return VINF_SUCCESS;
8634}
8635
8636
8637/**
8638 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
8639 */
8640HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8641{
8642 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8643 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
8644 pVCpu->hm.s.u32HMError = VMX_EXIT_NMI_WINDOW;
8645 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8646}
8647
8648
8649/**
8650 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
8651 */
8652HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8653{
8654 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8655 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
8656 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8657}
8658
8659
8660/**
8661 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
8662 */
8663HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8664{
8665 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8666 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
8667 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8668}
8669
8670
8671/**
8672 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
8673 */
8674HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8675{
8676 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8677 PVM pVM = pVCpu->CTX_SUFF(pVM);
8678 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8679 if (RT_LIKELY(rc == VINF_SUCCESS))
8680 {
8681 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8682 Assert(pVmxTransient->cbInstr == 2);
8683 }
8684 else
8685 {
8686 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
8687 rc = VERR_EM_INTERPRETER;
8688 }
8689 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
8690 return rc;
8691}
8692
8693
8694/**
8695 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
8696 */
8697HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8698{
8699 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8700 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
8701 AssertRCReturn(rc, rc);
8702
8703 if (pMixedCtx->cr4 & X86_CR4_SMXE)
8704 return VINF_EM_RAW_EMULATE_INSTR;
8705
8706 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
8707 pVCpu->hm.s.u32HMError = VMX_EXIT_GETSEC;
8708 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8709}
8710
8711
8712/**
8713 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
8714 */
8715HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8716{
8717 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8718 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8719 AssertRCReturn(rc, rc);
8720
8721 PVM pVM = pVCpu->CTX_SUFF(pVM);
8722 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8723 if (RT_LIKELY(rc == VINF_SUCCESS))
8724 {
8725 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8726 Assert(pVmxTransient->cbInstr == 2);
8727 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
8728 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
8729 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
8730 }
8731 else
8732 {
8733 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
8734 rc = VERR_EM_INTERPRETER;
8735 }
8736 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
8737 return rc;
8738}
8739
8740
8741/**
8742 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
8743 */
8744HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8745{
8746 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8747 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8748 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
8749 AssertRCReturn(rc, rc);
8750
8751 PVM pVM = pVCpu->CTX_SUFF(pVM);
8752 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
8753 if (RT_LIKELY(rc == VINF_SUCCESS))
8754 {
8755 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8756 Assert(pVmxTransient->cbInstr == 3);
8757 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
8758 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
8759 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
8760 }
8761 else
8762 {
8763 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
8764 rc = VERR_EM_INTERPRETER;
8765 }
8766 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
8767 return rc;
8768}
8769
8770
8771/**
8772 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
8773 */
8774HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8775{
8776 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8777 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8778 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
8779 AssertRCReturn(rc, rc);
8780
8781 PVM pVM = pVCpu->CTX_SUFF(pVM);
8782 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8783 if (RT_LIKELY(rc == VINF_SUCCESS))
8784 {
8785 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8786 Assert(pVmxTransient->cbInstr == 2);
8787 }
8788 else
8789 {
8790 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
8791 rc = VERR_EM_INTERPRETER;
8792 }
8793 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
8794 return rc;
8795}
8796
8797
8798/**
8799 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8800 */
8801HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8802{
8803 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8804 PVM pVM = pVCpu->CTX_SUFF(pVM);
8805 Assert(!pVM->hm.s.fNestedPaging);
8806
8807 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8808 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8809 AssertRCReturn(rc, rc);
8810
8811 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
8812 rc = VBOXSTRICTRC_VAL(rc2);
8813 if (RT_LIKELY(rc == VINF_SUCCESS))
8814 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8815 else
8816 {
8817 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
8818 pVmxTransient->uExitQualification, rc));
8819 }
8820 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
8821 return rc;
8822}
8823
8824
8825/**
8826 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8827 */
8828HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8829{
8830 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8831 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8832 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8833 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8834 AssertRCReturn(rc, rc);
8835
8836 PVM pVM = pVCpu->CTX_SUFF(pVM);
8837 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8838 if (RT_LIKELY(rc == VINF_SUCCESS))
8839 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8840 else
8841 {
8842 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
8843 rc = VERR_EM_INTERPRETER;
8844 }
8845 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
8846 return rc;
8847}
8848
8849
8850/**
8851 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8852 */
8853HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8854{
8855 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8856 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8857 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8858 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8859 AssertRCReturn(rc, rc);
8860
8861 PVM pVM = pVCpu->CTX_SUFF(pVM);
8862 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8863 rc = VBOXSTRICTRC_VAL(rc2);
8864 if (RT_LIKELY( rc == VINF_SUCCESS
8865 || rc == VINF_EM_HALT))
8866 {
8867 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8868 AssertRCReturn(rc3, rc3);
8869
8870 if ( rc == VINF_EM_HALT
8871 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
8872 {
8873 rc = VINF_SUCCESS;
8874 }
8875 }
8876 else
8877 {
8878 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
8879 rc = VERR_EM_INTERPRETER;
8880 }
8881 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
8882 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
8883 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
8884 return rc;
8885}
8886
8887
8888/**
8889 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
8890 */
8891HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8892{
8893 /*
8894 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
8895 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
8896 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
8897 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
8898 */
8899 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8900 pVCpu->hm.s.u32HMError = VMX_EXIT_RSM;
8901 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8902}
8903
8904
8905/**
8906 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
8907 */
8908HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8909{
8910 /*
8911 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
8912 * root operation. Only an STM (SMM transfer monitor) would get this exit when we (the executive monitor) execute a VMCALL
8913 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
8914 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
8915 */
8916 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8917 pVCpu->hm.s.u32HMError = VMX_EXIT_SMI;
8918 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8919}
8920
8921
8922/**
8923 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
8924 */
8925HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8926{
8927 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
8928 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8929 pVCpu->hm.s.u32HMError = VMX_EXIT_IO_SMI;
8930 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8931}
8932
8933
8934/**
8935 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
8936 */
8937HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8938{
8939 /*
8940 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
8941 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
8942 * See Intel spec. 25.3 "Other Causes of VM-exits".
8943 */
8944 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8945 pVCpu->hm.s.u32HMError = VMX_EXIT_SIPI;
8946 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8947}
8948
8949
8950/**
8951 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
8952 * VM-exit.
8953 */
8954HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8955{
8956 /*
8957 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8958 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
8959 *
8960 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
8961 * See Intel spec. "23.8 Restrictions on VMX operation".
8962 */
8963 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8964 return VINF_SUCCESS;
8965}
8966
8967
8968/**
8969 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8970 * VM-exit.
8971 */
8972HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8973{
8974 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8975 return VINF_EM_RESET;
8976}
8977
8978
8979/**
8980 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8981 */
8982HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8983{
8984 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8985 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
8986 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8987 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8988 AssertRCReturn(rc, rc);
8989
8990 pMixedCtx->rip++;
8991 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8992 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
8993 rc = VINF_SUCCESS;
8994 else
8995 rc = VINF_EM_HALT;
8996
8997 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
8998 return rc;
8999}
9000
9001
9002/**
9003 * VM-exit handler for instructions that result in a #UD exception delivered to
9004 * the guest.
9005 */
9006HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9007{
9008 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9009 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
9010 return VINF_SUCCESS;
9011}
9012
9013
9014/**
9015 * VM-exit handler for expiry of the VMX preemption timer.
9016 */
9017HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9018{
9019 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9020
9021 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
9022 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9023
9024 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
9025 PVM pVM = pVCpu->CTX_SUFF(pVM);
9026 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
9027 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
9028 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
9029}
9030
9031
9032/**
9033 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
9034 */
9035HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9036{
9037 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9038
9039 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
9040 /** @todo check if XSETBV is supported by the recompiler. */
9041 return VERR_EM_INTERPRETER;
9042}
9043
9044
9045/**
9046 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
9047 */
9048HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9049{
9050 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9051
9052 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
9053 /** @todo implement EMInterpretInvpcid() */
9054 return VERR_EM_INTERPRETER;
9055}
9056
9057
9058/**
9059 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
9060 * Error VM-exit.
9061 */
9062HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9063{
9064 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9065 AssertRCReturn(rc, rc);
9066
9067 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
9068 NOREF(uInvalidReason);
9069
9070#ifdef VBOX_STRICT
9071 uint32_t uIntrState;
9072 HMVMXHCUINTREG uHCReg;
9073 uint64_t u64Val;
9074 uint32_t u32Val;
9075
9076 rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
9077 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
9078 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
9079 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
9080 AssertRCReturn(rc, rc);
9081
9082 Log4(("uInvalidReason %u\n", uInvalidReason));
9083 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
9084 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
9085 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
9086 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
9087
9088 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
9089 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
9090 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
9091 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
9092 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
9093 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
9094 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
9095 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
9096 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
9097 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
9098 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
9099 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
9100#endif
9101
9102 PVM pVM = pVCpu->CTX_SUFF(pVM);
9103 HMDumpRegs(pVM, pVCpu, pMixedCtx);
9104
9105 return VERR_VMX_INVALID_GUEST_STATE;
9106}
9107
9108
9109/**
9110 * VM-exit handler for VM-entry failure due to an MSR-load
9111 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
9112 */
9113HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9114{
9115 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9116 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9117}
9118
9119
9120/**
9121 * VM-exit handler for VM-entry failure due to a machine-check event
9122 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
9123 */
9124HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9125{
9126 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9127 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9128}
9129
9130
9131/**
9132 * VM-exit handler for all undefined reasons. Should never ever happen.. in
9133 * theory.
9134 */
9135HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9136{
9137 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
9138 return VERR_VMX_UNDEFINED_EXIT_CODE;
9139}
9140
9141
9142/**
9143 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
9144 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
9145 * Conditional VM-exit.
9146 */
9147HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9148{
9149 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9150
9151 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
9152 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
9153 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
9154 return VERR_EM_INTERPRETER;
9155 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9156 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9157}
9158
9159
9160/**
9161 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
9162 */
9163HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9164{
9165 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9166
9167 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
9168 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
9169 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
9170 return VERR_EM_INTERPRETER;
9171 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9172 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9173}
9174
9175
9176/**
9177 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9178 */
9179HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9180{
9181 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9182
9183 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
9184 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9185 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9186 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9187 AssertRCReturn(rc, rc);
9188 Log4(("CS:RIP=%04x:%#RX64 ECX=%X\n", pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->ecx));
9189
9190 PVM pVM = pVCpu->CTX_SUFF(pVM);
9191 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9192 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
9193 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
9194 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
9195
9196 if (RT_LIKELY(rc == VINF_SUCCESS))
9197 {
9198 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9199 Assert(pVmxTransient->cbInstr == 2);
9200 }
9201 return rc;
9202}
9203
9204
9205/**
9206 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9207 */
9208HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9209{
9210 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9211 PVM pVM = pVCpu->CTX_SUFF(pVM);
9212 int rc = VINF_SUCCESS;
9213
9214 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
9215 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9216 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9217 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9218 AssertRCReturn(rc, rc);
9219 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
9220
9221 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9222 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
9223 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
9224
9225 if (RT_LIKELY(rc == VINF_SUCCESS))
9226 {
9227 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9228
9229 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
9230 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
9231 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
9232 {
9233 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
9234 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
9235 EMInterpretWrmsr() changes it. */
9236 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
9237 }
9238 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
9239 {
9240 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
9241 AssertRCReturn(rc, rc);
9242 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
9243 }
9244 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
9245 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9246
9247 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
9248 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
9249 {
9250 switch (pMixedCtx->ecx)
9251 {
9252 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
9253 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
9254 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
9255 case MSR_K8_FS_BASE: /* no break */
9256 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
9257 case MSR_K8_KERNEL_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; break;
9258 }
9259 }
9260#ifdef VBOX_STRICT
9261 else
9262 {
9263 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
9264 switch (pMixedCtx->ecx)
9265 {
9266 case MSR_IA32_SYSENTER_CS:
9267 case MSR_IA32_SYSENTER_EIP:
9268 case MSR_IA32_SYSENTER_ESP:
9269 case MSR_K8_FS_BASE:
9270 case MSR_K8_GS_BASE:
9271 {
9272 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
9273 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9274 }
9275
9276 case MSR_K8_LSTAR:
9277 case MSR_K6_STAR:
9278 case MSR_K8_SF_MASK:
9279 case MSR_K8_TSC_AUX:
9280 case MSR_K8_KERNEL_GS_BASE:
9281 {
9282 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
9283 pMixedCtx->ecx));
9284 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9285 }
9286 }
9287 }
9288#endif /* VBOX_STRICT */
9289 }
9290 return rc;
9291}
9292
9293
9294/**
9295 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9296 */
9297HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9298{
9299 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9300
9301 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
9302 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
9303 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
9304 return VERR_EM_INTERPRETER;
9305 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9306 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9307}
9308
9309
9310/**
9311 * VM-exit handler for when the TPR value is lowered below the specified
9312 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9313 */
9314HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9315{
9316 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9317 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
9318
9319 /*
9320 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
9321 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
9322 * resume guest execution.
9323 */
9324 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
9325 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
9326 return VINF_SUCCESS;
9327}
9328
9329
9330/**
9331 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
9332 * VM-exit.
9333 *
9334 * @retval VINF_SUCCESS when guest execution can continue.
9335 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
9336 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
9337 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
9338 * recompiler.
9339 */
9340HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9341{
9342 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9343 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
9344 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9345 AssertRCReturn(rc, rc);
9346
9347 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
9348 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
9349 PVM pVM = pVCpu->CTX_SUFF(pVM);
9350 switch (uAccessType)
9351 {
9352 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
9353 {
9354#if 0
9355 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
9356 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9357#else
9358 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9359 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9360 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9361#endif
9362 AssertRCReturn(rc, rc);
9363
9364 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9365 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
9366 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
9367 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
9368
9369 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
9370 {
9371 case 0: /* CR0 */
9372 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
9373 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
9374 break;
9375 case 2: /* C2 **/
9376 /* Nothing to do here, CR2 it's not part of the VMCS. */
9377 break;
9378 case 3: /* CR3 */
9379 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
9380 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
9381 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
9382 break;
9383 case 4: /* CR4 */
9384 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
9385 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
9386 break;
9387 case 8: /* CR8 */
9388 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
9389 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
9390 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
9391 break;
9392 default:
9393 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
9394 break;
9395 }
9396
9397 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
9398 break;
9399 }
9400
9401 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
9402 {
9403 /* EMInterpretCRxRead() requires EFER MSR, CS. */
9404 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9405 AssertRCReturn(rc, rc);
9406 Assert( !pVM->hm.s.fNestedPaging
9407 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
9408 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
9409
9410 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
9411 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
9412 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
9413
9414 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9415 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
9416 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
9417 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9418 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
9419 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
9420 break;
9421 }
9422
9423 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
9424 {
9425 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9426 AssertRCReturn(rc, rc);
9427 rc = EMInterpretCLTS(pVM, pVCpu);
9428 AssertRCReturn(rc, rc);
9429 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
9430 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
9431 Log4(("CRX CLTS write rc=%d\n", rc));
9432 break;
9433 }
9434
9435 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9436 {
9437 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9438 AssertRCReturn(rc, rc);
9439 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
9440 if (RT_LIKELY(rc == VINF_SUCCESS))
9441 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
9442 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
9443 Log4(("CRX LMSW write rc=%d\n", rc));
9444 break;
9445 }
9446
9447 default:
9448 {
9449 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
9450 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
9451 }
9452 }
9453
9454 /* Validate possible error codes. */
9455 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
9456 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
9457 if (RT_SUCCESS(rc))
9458 {
9459 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9460 AssertRCReturn(rc2, rc2);
9461 }
9462
9463 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
9464 return rc;
9465}
9466
9467
9468/**
9469 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
9470 * VM-exit.
9471 */
9472HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9473{
9474 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9475 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
9476
9477 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9478 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9479 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9480 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
9481 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
9482 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
9483 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
9484 AssertRCReturn(rc2, rc2);
9485
9486 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
9487 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
9488 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
9489 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
9490 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
9491 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
9492 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_HMVMX_IPE_1);
9493
9494 /* I/O operation lookup arrays. */
9495 static const uint32_t s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
9496 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
9497
9498 VBOXSTRICTRC rcStrict;
9499 const uint32_t cbValue = s_aIOSizes[uIOWidth];
9500 const uint32_t cbInstr = pVmxTransient->cbInstr;
9501 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
9502 PVM pVM = pVCpu->CTX_SUFF(pVM);
9503 if (fIOString)
9504 {
9505 /*
9506 * INS/OUTS - I/O String instruction.
9507 *
9508 * Use instruction-information if available, otherwise fall back on
9509 * interpreting the instruction.
9510 */
9511 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
9512#if 0 /* Not quite ready, seem iSegReg assertion trigger once... Do we perhaps need to always read that in longjmp / preempt scenario? */
9513 AssertReturn(pMixedCtx->dx == uIOPort, VERR_HMVMX_IPE_2);
9514 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.u64BasicInfo))
9515 {
9516 rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9517 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
9518 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9519 AssertRCReturn(rc2, rc2);
9520 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_HMVMX_IPE_3);
9521 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
9522 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
9523 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
9524 if (fIOWrite)
9525 {
9526 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
9527 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
9528 //if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9529 // hmR0SavePendingIOPortWriteStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr,
9530 // pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
9531 }
9532 else
9533 {
9534 AssertMsgReturn(pVmxTransient->ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES,
9535 ("%#x (%#llx)\n", pVmxTransient->ExitInstrInfo.StrIo.iSegReg, pVmxTransient->ExitInstrInfo.u),
9536 VERR_HMVMX_IPE_4);
9537 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
9538 //if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9539 // hmR0SavePendingIOPortReadStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr);
9540 }
9541 }
9542 else
9543 {
9544 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
9545 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9546 AssertRCReturn(rc2, rc2);
9547 rcStrict = IEMExecOne(pVCpu);
9548 }
9549 /** @todo IEM needs to be setting these flags somehow. */
9550 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
9551 fUpdateRipAlready = true;
9552#else
9553 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
9554 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
9555 if (RT_SUCCESS(rcStrict))
9556 {
9557 if (fIOWrite)
9558 {
9559 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
9560 (DISCPUMODE)pDis->uAddrMode, cbValue);
9561 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
9562 }
9563 else
9564 {
9565 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
9566 (DISCPUMODE)pDis->uAddrMode, cbValue);
9567 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
9568 }
9569 }
9570 else
9571 {
9572 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
9573 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9574 }
9575#endif
9576 }
9577 else
9578 {
9579 /*
9580 * IN/OUT - I/O instruction.
9581 */
9582 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
9583 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
9584 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
9585 if (fIOWrite)
9586 {
9587 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
9588 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9589 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
9590 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
9591 }
9592 else
9593 {
9594 uint32_t u32Result = 0;
9595 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
9596 if (IOM_SUCCESS(rcStrict))
9597 {
9598 /* Save result of I/O IN instr. in AL/AX/EAX. */
9599 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
9600 }
9601 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9602 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
9603 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
9604 }
9605 }
9606
9607 if (IOM_SUCCESS(rcStrict))
9608 {
9609 if (!fUpdateRipAlready)
9610 {
9611 pMixedCtx->rip += cbInstr;
9612 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
9613 }
9614
9615 /*
9616 * If any I/O breakpoints are armed, we need to check if one triggered
9617 * and take appropriate action.
9618 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9619 */
9620 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
9621 AssertRCReturn(rc2, rc2);
9622
9623 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9624 * execution engines about whether hyper BPs and such are pending. */
9625 uint32_t const uDr7 = pMixedCtx->dr[7];
9626 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9627 && X86_DR7_ANY_RW_IO(uDr7)
9628 && (pMixedCtx->cr4 & X86_CR4_DE))
9629 || DBGFBpIsHwIoArmed(pVM)))
9630 {
9631 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
9632
9633 /* We're playing with the host CPU state here, make sure we don't preempt. */
9634 HM_DISABLE_PREEMPT_IF_NEEDED();
9635 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/);
9636
9637 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
9638 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9639 {
9640 /* Raise #DB. */
9641 if (fIsGuestDbgActive)
9642 ASMSetDR6(pMixedCtx->dr[6]);
9643 if (pMixedCtx->dr[7] != uDr7)
9644 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
9645
9646 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
9647 }
9648 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
9649 else if ( rcStrict2 != VINF_SUCCESS
9650 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9651 rcStrict = rcStrict2;
9652
9653 HM_RESTORE_PREEMPT_IF_NEEDED();
9654 }
9655 }
9656
9657#ifdef DEBUG
9658 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9659 Assert(!fIOWrite);
9660 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9661 Assert(fIOWrite);
9662 else
9663 {
9664 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9665 * statuses, that the VMM device and some others may return. See
9666 * IOM_SUCCESS() for guidance. */
9667 AssertMsg( RT_FAILURE(rcStrict)
9668 || rcStrict == VINF_SUCCESS
9669 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9670 || rcStrict == VINF_EM_DBG_BREAKPOINT
9671 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9672 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9673 }
9674#endif
9675
9676 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
9677 return VBOXSTRICTRC_TODO(rcStrict);
9678}
9679
9680
9681/**
9682 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9683 * VM-exit.
9684 */
9685HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9686{
9687 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9688
9689 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9690 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9691 AssertRCReturn(rc, rc);
9692 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
9693 {
9694 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
9695 AssertRCReturn(rc, rc);
9696 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
9697 {
9698 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
9699
9700 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
9701 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
9702 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
9703 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
9704 {
9705 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
9706 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
9707
9708 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
9709 Assert(!pVCpu->hm.s.Event.fPending);
9710 pVCpu->hm.s.Event.fPending = true;
9711 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
9712 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
9713 AssertRCReturn(rc, rc);
9714 if (fErrorCodeValid)
9715 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
9716 else
9717 pVCpu->hm.s.Event.u32ErrCode = 0;
9718 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
9719 && uVector == X86_XCPT_PF)
9720 {
9721 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
9722 }
9723
9724 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
9725 }
9726 }
9727 }
9728
9729 /** @todo Emulate task switch someday, currently just going back to ring-3 for
9730 * emulation. */
9731 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
9732 return VERR_EM_INTERPRETER;
9733}
9734
9735
9736/**
9737 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9738 */
9739HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9740{
9741 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9742 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
9743 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
9744 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
9745 AssertRCReturn(rc, rc);
9746 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
9747 return VINF_EM_DBG_STEPPED;
9748}
9749
9750
9751/**
9752 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9753 */
9754HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9755{
9756 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9757
9758 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9759 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9760 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9761 return VINF_SUCCESS;
9762 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9763 return rc;
9764
9765#if 0
9766 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
9767 * just sync the whole thing. */
9768 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9769#else
9770 /* Aggressive state sync. for now. */
9771 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9772 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9773 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9774#endif
9775 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9776 AssertRCReturn(rc, rc);
9777
9778 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9779 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
9780 switch (uAccessType)
9781 {
9782 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9783 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9784 {
9785 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
9786 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
9787 {
9788 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9789 }
9790
9791 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
9792 GCPhys &= PAGE_BASE_GC_MASK;
9793 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
9794 PVM pVM = pVCpu->CTX_SUFF(pVM);
9795 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
9796 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
9797
9798 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
9799 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
9800 CPUMCTX2CORE(pMixedCtx), GCPhys);
9801 rc = VBOXSTRICTRC_VAL(rc2);
9802 Log4(("ApicAccess rc=%d\n", rc));
9803 if ( rc == VINF_SUCCESS
9804 || rc == VERR_PAGE_TABLE_NOT_PRESENT
9805 || rc == VERR_PAGE_NOT_PRESENT)
9806 {
9807 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9808 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9809 rc = VINF_SUCCESS;
9810 }
9811 break;
9812 }
9813
9814 default:
9815 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
9816 rc = VINF_EM_RAW_EMULATE_INSTR;
9817 break;
9818 }
9819
9820 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
9821 return rc;
9822}
9823
9824
9825/**
9826 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9827 * VM-exit.
9828 */
9829HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9830{
9831 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9832
9833 /* We should -not- get this VM-exit if the guest is debugging. */
9834 if (CPUMIsGuestDebugStateActive(pVCpu))
9835 {
9836 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9837 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9838 }
9839
9840 int rc = VERR_INTERNAL_ERROR_5;
9841 if ( !DBGFIsStepping(pVCpu)
9842 && !pVCpu->hm.s.fSingleInstruction
9843 && !CPUMIsHyperDebugStateActive(pVCpu))
9844 {
9845 /* Don't intercept MOV DRx and #DB any more. */
9846 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
9847 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
9848 AssertRCReturn(rc, rc);
9849
9850 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9851 {
9852#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9853 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
9854 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
9855 AssertRCReturn(rc, rc);
9856#endif
9857 }
9858
9859 /* We're playing with the host CPU state here, make sure we can't preempt. */
9860 HM_DISABLE_PREEMPT_IF_NEEDED();
9861
9862 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9863 PVM pVM = pVCpu->CTX_SUFF(pVM);
9864 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9865 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
9866
9867 HM_RESTORE_PREEMPT_IF_NEEDED();
9868
9869#ifdef VBOX_WITH_STATISTICS
9870 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9871 AssertRCReturn(rc, rc);
9872 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
9873 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
9874 else
9875 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
9876#endif
9877 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
9878 return VINF_SUCCESS;
9879 }
9880
9881 /*
9882 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
9883 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
9884 */
9885 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9886 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9887 AssertRCReturn(rc, rc);
9888 Log4(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
9889
9890 PVM pVM = pVCpu->CTX_SUFF(pVM);
9891 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
9892 {
9893 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9894 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
9895 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
9896 if (RT_SUCCESS(rc))
9897 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
9898 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
9899 }
9900 else
9901 {
9902 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9903 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
9904 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
9905 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
9906 }
9907
9908 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9909 if (RT_SUCCESS(rc))
9910 {
9911 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9912 AssertRCReturn(rc2, rc2);
9913 }
9914 return rc;
9915}
9916
9917
9918/**
9919 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9920 * Conditional VM-exit.
9921 */
9922HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9923{
9924 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9925 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
9926
9927 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9928 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9929 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9930 return VINF_SUCCESS;
9931 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9932 return rc;
9933
9934 RTGCPHYS GCPhys = 0;
9935 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
9936
9937#if 0
9938 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
9939#else
9940 /* Aggressive state sync. for now. */
9941 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9942 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9943 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9944#endif
9945 AssertRCReturn(rc, rc);
9946
9947 /*
9948 * If we succeed, resume guest execution.
9949 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9950 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9951 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9952 * weird case. See @bugref{6043}.
9953 */
9954 PVM pVM = pVCpu->CTX_SUFF(pVM);
9955 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
9956 rc = VBOXSTRICTRC_VAL(rc2);
9957 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
9958 if ( rc == VINF_SUCCESS
9959 || rc == VERR_PAGE_TABLE_NOT_PRESENT
9960 || rc == VERR_PAGE_NOT_PRESENT)
9961 {
9962 /* Successfully handled MMIO operation. */
9963 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9964 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9965 rc = VINF_SUCCESS;
9966 }
9967 return rc;
9968}
9969
9970
9971/**
9972 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9973 * VM-exit.
9974 */
9975HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9976{
9977 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9978 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
9979
9980 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9981 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9982 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9983 return VINF_SUCCESS;
9984 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9985 return rc;
9986
9987 RTGCPHYS GCPhys = 0;
9988 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
9989 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9990#if 0
9991 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
9992#else
9993 /* Aggressive state sync. for now. */
9994 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9995 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9996 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9997#endif
9998 AssertRCReturn(rc, rc);
9999
10000 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
10001 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
10002
10003 RTGCUINT uErrorCode = 0;
10004 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
10005 uErrorCode |= X86_TRAP_PF_ID;
10006 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
10007 uErrorCode |= X86_TRAP_PF_RW;
10008 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
10009 uErrorCode |= X86_TRAP_PF_P;
10010
10011 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
10012
10013 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
10014 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
10015
10016 /* Handle the pagefault trap for the nested shadow table. */
10017 PVM pVM = pVCpu->CTX_SUFF(pVM);
10018 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
10019 TRPMResetTrap(pVCpu);
10020
10021 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
10022 if ( rc == VINF_SUCCESS
10023 || rc == VERR_PAGE_TABLE_NOT_PRESENT
10024 || rc == VERR_PAGE_NOT_PRESENT)
10025 {
10026 /* Successfully synced our nested page tables. */
10027 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
10028 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
10029 return VINF_SUCCESS;
10030 }
10031
10032 Log4(("EPT return to ring-3 rc=%d\n"));
10033 return rc;
10034}
10035
10036/** @} */
10037
10038/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10039/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
10040/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10041
10042/** @name VM-exit exception handlers.
10043 * @{
10044 */
10045
10046/**
10047 * VM-exit exception handler for #MF (Math Fault: floating point exception).
10048 */
10049static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10050{
10051 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10052 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
10053
10054 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10055 AssertRCReturn(rc, rc);
10056
10057 if (!(pMixedCtx->cr0 & X86_CR0_NE))
10058 {
10059 /* Old-style FPU error reporting needs some extra work. */
10060 /** @todo don't fall back to the recompiler, but do it manually. */
10061 return VERR_EM_INTERPRETER;
10062 }
10063
10064 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10065 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10066 return rc;
10067}
10068
10069
10070/**
10071 * VM-exit exception handler for #BP (Breakpoint exception).
10072 */
10073static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10074{
10075 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10076 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
10077
10078 /** @todo Try optimize this by not saving the entire guest state unless
10079 * really needed. */
10080 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10081 AssertRCReturn(rc, rc);
10082
10083 PVM pVM = pVCpu->CTX_SUFF(pVM);
10084 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10085 if (rc == VINF_EM_RAW_GUEST_TRAP)
10086 {
10087 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10088 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10089 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10090 AssertRCReturn(rc, rc);
10091
10092 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10093 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10094 }
10095
10096 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
10097 return rc;
10098}
10099
10100
10101/**
10102 * VM-exit exception handler for #DB (Debug exception).
10103 */
10104static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10105{
10106 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10107 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
10108 Log6(("XcptDB\n"));
10109
10110 /*
10111 * Get the DR6-like values from the exit qualification and pass it to DBGF
10112 * for processing.
10113 */
10114 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10115 AssertRCReturn(rc, rc);
10116
10117 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
10118 uint64_t uDR6 = X86_DR6_INIT_VAL;
10119 uDR6 |= ( pVmxTransient->uExitQualification
10120 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
10121
10122 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
10123 if (rc == VINF_EM_RAW_GUEST_TRAP)
10124 {
10125 /*
10126 * The exception was for the guest. Update DR6, DR7.GD and
10127 * IA32_DEBUGCTL.LBR before forwarding it.
10128 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
10129 */
10130 HM_DISABLE_PREEMPT_IF_NEEDED();
10131
10132 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
10133 pMixedCtx->dr[6] |= uDR6;
10134 if (CPUMIsGuestDebugStateActive(pVCpu))
10135 ASMSetDR6(pMixedCtx->dr[6]);
10136
10137 HM_RESTORE_PREEMPT_IF_NEEDED();
10138
10139 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
10140 AssertRCReturn(rc, rc);
10141
10142 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
10143 pMixedCtx->dr[7] &= ~X86_DR7_GD;
10144
10145 /* Paranoia. */
10146 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
10147 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
10148
10149 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
10150 AssertRCReturn(rc, rc);
10151
10152 /*
10153 * Raise #DB in the guest.
10154 */
10155 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10156 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10157 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10158 AssertRCReturn(rc, rc);
10159 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10160 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10161 return VINF_SUCCESS;
10162 }
10163
10164 /*
10165 * Not a guest trap, must be a hypervisor related debug event then.
10166 * Update DR6 in case someone is interested in it.
10167 */
10168 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
10169 AssertReturn(CPUMIsHyperDebugStateActive(pVCpu), VERR_HM_IPE_5);
10170 CPUMSetHyperDR6(pVCpu, uDR6);
10171
10172 return rc;
10173}
10174
10175
10176/**
10177 * VM-exit exception handler for #NM (Device-not-available exception: floating
10178 * point exception).
10179 */
10180static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10181{
10182 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10183
10184#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
10185 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
10186#endif
10187
10188 /* We require CR0 and EFER. EFER is always up-to-date. */
10189 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10190 AssertRCReturn(rc, rc);
10191
10192 /* We're playing with the host CPU state here, have to disable preemption. */
10193 HM_DISABLE_PREEMPT_IF_NEEDED();
10194
10195 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
10196 PVM pVM = pVCpu->CTX_SUFF(pVM);
10197 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
10198 if (rc == VINF_SUCCESS)
10199 {
10200 Assert(CPUMIsGuestFPUStateActive(pVCpu));
10201 HM_RESTORE_PREEMPT_IF_NEEDED();
10202
10203 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
10204 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
10205 return VINF_SUCCESS;
10206 }
10207
10208 HM_RESTORE_PREEMPT_IF_NEEDED();
10209
10210 /* Forward #NM to the guest. */
10211 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
10212 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10213 AssertRCReturn(rc, rc);
10214 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10215 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
10216 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
10217 return rc;
10218}
10219
10220
10221/**
10222 * VM-exit exception handler for #GP (General-protection exception).
10223 *
10224 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
10225 */
10226static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10227{
10228 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10229 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
10230
10231 int rc = VERR_INTERNAL_ERROR_5;
10232 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10233 {
10234#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
10235 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
10236 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10237 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10238 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10239 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10240 AssertRCReturn(rc, rc);
10241 Log4(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
10242 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
10243 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10244 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10245 return rc;
10246#else
10247 /* We don't intercept #GP. */
10248 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
10249 return VERR_VMX_UNEXPECTED_EXCEPTION;
10250#endif
10251 }
10252
10253 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
10254 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
10255
10256 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
10257 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10258 AssertRCReturn(rc, rc);
10259
10260 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
10261 uint32_t cbOp = 0;
10262 PVM pVM = pVCpu->CTX_SUFF(pVM);
10263 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
10264 if (RT_SUCCESS(rc))
10265 {
10266 rc = VINF_SUCCESS;
10267 Assert(cbOp == pDis->cbInstr);
10268 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
10269 switch (pDis->pCurInstr->uOpcode)
10270 {
10271 case OP_CLI:
10272 {
10273 pMixedCtx->eflags.Bits.u1IF = 0;
10274 pMixedCtx->rip += pDis->cbInstr;
10275 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
10276 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
10277 break;
10278 }
10279
10280 case OP_STI:
10281 {
10282 pMixedCtx->eflags.Bits.u1IF = 1;
10283 pMixedCtx->rip += pDis->cbInstr;
10284 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
10285 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
10286 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
10287 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
10288 break;
10289 }
10290
10291 case OP_HLT:
10292 {
10293 rc = VINF_EM_HALT;
10294 pMixedCtx->rip += pDis->cbInstr;
10295 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
10296 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10297 break;
10298 }
10299
10300 case OP_POPF:
10301 {
10302 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
10303 uint32_t cbParm = 0;
10304 uint32_t uMask = 0;
10305 if (pDis->fPrefix & DISPREFIX_OPSIZE)
10306 {
10307 cbParm = 4;
10308 uMask = 0xffffffff;
10309 }
10310 else
10311 {
10312 cbParm = 2;
10313 uMask = 0xffff;
10314 }
10315
10316 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
10317 RTGCPTR GCPtrStack = 0;
10318 X86EFLAGS Eflags;
10319 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
10320 &GCPtrStack);
10321 if (RT_SUCCESS(rc))
10322 {
10323 Assert(sizeof(Eflags.u32) >= cbParm);
10324 Eflags.u32 = 0;
10325 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm);
10326 }
10327 if (RT_FAILURE(rc))
10328 {
10329 rc = VERR_EM_INTERPRETER;
10330 break;
10331 }
10332 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
10333 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
10334 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
10335 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
10336 pMixedCtx->eflags.Bits.u1RF = 0;
10337 pMixedCtx->esp += cbParm;
10338 pMixedCtx->esp &= uMask;
10339 pMixedCtx->rip += pDis->cbInstr;
10340 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
10341 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
10342 break;
10343 }
10344
10345 case OP_PUSHF:
10346 {
10347 uint32_t cbParm = 0;
10348 uint32_t uMask = 0;
10349 if (pDis->fPrefix & DISPREFIX_OPSIZE)
10350 {
10351 cbParm = 4;
10352 uMask = 0xffffffff;
10353 }
10354 else
10355 {
10356 cbParm = 2;
10357 uMask = 0xffff;
10358 }
10359
10360 /* Get the stack pointer & push the contents of eflags onto the stack. */
10361 RTGCPTR GCPtrStack = 0;
10362 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
10363 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
10364 if (RT_FAILURE(rc))
10365 {
10366 rc = VERR_EM_INTERPRETER;
10367 break;
10368 }
10369 X86EFLAGS Eflags = pMixedCtx->eflags;
10370 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
10371 Eflags.Bits.u1RF = 0;
10372 Eflags.Bits.u1VM = 0;
10373
10374 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm);
10375 if (RT_FAILURE(rc))
10376 {
10377 rc = VERR_EM_INTERPRETER;
10378 break;
10379 }
10380 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
10381 pMixedCtx->esp -= cbParm;
10382 pMixedCtx->esp &= uMask;
10383 pMixedCtx->rip += pDis->cbInstr;
10384 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
10385 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
10386 break;
10387 }
10388
10389 case OP_IRET:
10390 {
10391 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
10392 * instruction reference. */
10393 RTGCPTR GCPtrStack = 0;
10394 uint32_t uMask = 0xffff;
10395 uint16_t aIretFrame[3];
10396 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
10397 {
10398 rc = VERR_EM_INTERPRETER;
10399 break;
10400 }
10401 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
10402 &GCPtrStack);
10403 if (RT_SUCCESS(rc))
10404 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
10405 if (RT_FAILURE(rc))
10406 {
10407 rc = VERR_EM_INTERPRETER;
10408 break;
10409 }
10410 pMixedCtx->eip = 0;
10411 pMixedCtx->ip = aIretFrame[0];
10412 pMixedCtx->cs.Sel = aIretFrame[1];
10413 pMixedCtx->cs.ValidSel = aIretFrame[1];
10414 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
10415 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
10416 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
10417 pMixedCtx->sp += sizeof(aIretFrame);
10418 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
10419 | HM_CHANGED_GUEST_RFLAGS;
10420 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
10421 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
10422 break;
10423 }
10424
10425 case OP_INT:
10426 {
10427 uint16_t uVector = pDis->Param1.uValue & 0xff;
10428 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
10429 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
10430 break;
10431 }
10432
10433 case OP_INTO:
10434 {
10435 if (pMixedCtx->eflags.Bits.u1OF)
10436 {
10437 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
10438 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
10439 }
10440 break;
10441 }
10442
10443 default:
10444 {
10445 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
10446 EMCODETYPE_SUPERVISOR);
10447 rc = VBOXSTRICTRC_VAL(rc2);
10448 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
10449 Log4(("#GP rc=%Rrc\n", rc));
10450 break;
10451 }
10452 }
10453 }
10454 else
10455 rc = VERR_EM_INTERPRETER;
10456
10457 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
10458 ("#GP Unexpected rc=%Rrc\n", rc));
10459 return rc;
10460}
10461
10462
10463/**
10464 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
10465 * the exception reported in the VMX transient structure back into the VM.
10466 *
10467 * @remarks Requires uExitIntrInfo in the VMX transient structure to be
10468 * up-to-date.
10469 */
10470static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10471{
10472 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10473
10474 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
10475 hmR0VmxCheckExitDueToEventDelivery(). */
10476 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10477 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10478 AssertRCReturn(rc, rc);
10479 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
10480
10481 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10482 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10483 return VINF_SUCCESS;
10484}
10485
10486
10487/**
10488 * VM-exit exception handler for #PF (Page-fault exception).
10489 */
10490static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10491{
10492 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10493 PVM pVM = pVCpu->CTX_SUFF(pVM);
10494 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10495 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10496 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10497 AssertRCReturn(rc, rc);
10498
10499#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
10500 if (pVM->hm.s.fNestedPaging)
10501 {
10502 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
10503 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
10504 {
10505 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
10506 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10507 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
10508 }
10509 else
10510 {
10511 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
10512 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
10513 Log4(("Pending #DF due to vectoring #PF. NP\n"));
10514 }
10515 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
10516 return rc;
10517 }
10518#else
10519 Assert(!pVM->hm.s.fNestedPaging);
10520#endif
10521
10522 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10523 AssertRCReturn(rc, rc);
10524
10525 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
10526 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
10527
10528 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
10529 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
10530 (RTGCPTR)pVmxTransient->uExitQualification);
10531
10532 Log4(("#PF: rc=%Rrc\n", rc));
10533 if (rc == VINF_SUCCESS)
10534 {
10535 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
10536 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
10537 * memory? We don't update the whole state here... */
10538 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
10539 | HM_CHANGED_VMX_GUEST_APIC_STATE;
10540 TRPMResetTrap(pVCpu);
10541 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
10542 return rc;
10543 }
10544 else if (rc == VINF_EM_RAW_GUEST_TRAP)
10545 {
10546 if (!pVmxTransient->fVectoringPF)
10547 {
10548 /* It's a guest page fault and needs to be reflected to the guest. */
10549 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
10550 TRPMResetTrap(pVCpu);
10551 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
10552 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
10553 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10554 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
10555 }
10556 else
10557 {
10558 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
10559 TRPMResetTrap(pVCpu);
10560 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
10561 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
10562 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
10563 }
10564
10565 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
10566 return VINF_SUCCESS;
10567 }
10568
10569 TRPMResetTrap(pVCpu);
10570 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
10571 return rc;
10572}
10573
10574/** @} */
10575
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette