VirtualBox

source: vbox/trunk/src/VBox/VMM/VMMR0/HMVMXR0.cpp@ 48201

Last change on this file since 48201 was 48194, checked in by vboxsync, 11 years ago

VMM/HMVMXR0: Even without preemption hooks, we can now load the guest-state before disabling interrupts.
Better handling of triple faults conditions (don't leave with interrupts disabled).

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 443.5 KB
Line 
1/* $Id: HMVMXR0.cpp 48194 2013-08-30 14:33:38Z vboxsync $ */
2/** @file
3 * HM VMX (Intel VT-x) - Host Context Ring-0.
4 */
5
6/*
7 * Copyright (C) 2012-2013 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*******************************************************************************
19* Header Files *
20*******************************************************************************/
21#define LOG_GROUP LOG_GROUP_HM
22#include <iprt/asm-amd64-x86.h>
23#include <iprt/thread.h>
24#include <iprt/string.h>
25
26#include "HMInternal.h"
27#include <VBox/vmm/vm.h>
28#include "HMVMXR0.h"
29#include <VBox/vmm/pdmapi.h>
30#include <VBox/vmm/dbgf.h>
31#include <VBox/vmm/iem.h>
32#include <VBox/vmm/iom.h>
33#include <VBox/vmm/selm.h>
34#include <VBox/vmm/tm.h>
35#ifdef VBOX_WITH_REM
36# include <VBox/vmm/rem.h>
37#endif
38#ifdef DEBUG_ramshankar
39#define HMVMX_SAVE_FULL_GUEST_STATE
40#define HMVMX_SYNC_FULL_GUEST_STATE
41#define HMVMX_ALWAYS_CHECK_GUEST_STATE
42#define HMVMX_ALWAYS_TRAP_ALL_XCPTS
43#define HMVMX_ALWAYS_TRAP_PF
44#endif
45
46
47/*******************************************************************************
48* Defined Constants And Macros *
49*******************************************************************************/
50#if defined(RT_ARCH_AMD64)
51# define HMVMX_IS_64BIT_HOST_MODE() (true)
52typedef RTHCUINTREG HMVMXHCUINTREG;
53#elif defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
54extern "C" uint32_t g_fVMXIs64bitHost;
55# define HMVMX_IS_64BIT_HOST_MODE() (g_fVMXIs64bitHost != 0)
56typedef uint64_t HMVMXHCUINTREG;
57#else
58# define HMVMX_IS_64BIT_HOST_MODE() (false)
59typedef RTHCUINTREG HMVMXHCUINTREG;
60#endif
61
62/** Use the function table. */
63#define HMVMX_USE_FUNCTION_TABLE
64
65/** Determine which tagged-TLB flush handler to use. */
66#define HMVMX_FLUSH_TAGGED_TLB_EPT_VPID 0
67#define HMVMX_FLUSH_TAGGED_TLB_EPT 1
68#define HMVMX_FLUSH_TAGGED_TLB_VPID 2
69#define HMVMX_FLUSH_TAGGED_TLB_NONE 3
70
71/** @name Updated-guest-state flags.
72 * @{ */
73#define HMVMX_UPDATED_GUEST_RIP RT_BIT(0)
74#define HMVMX_UPDATED_GUEST_RSP RT_BIT(1)
75#define HMVMX_UPDATED_GUEST_RFLAGS RT_BIT(2)
76#define HMVMX_UPDATED_GUEST_CR0 RT_BIT(3)
77#define HMVMX_UPDATED_GUEST_CR3 RT_BIT(4)
78#define HMVMX_UPDATED_GUEST_CR4 RT_BIT(5)
79#define HMVMX_UPDATED_GUEST_GDTR RT_BIT(6)
80#define HMVMX_UPDATED_GUEST_IDTR RT_BIT(7)
81#define HMVMX_UPDATED_GUEST_LDTR RT_BIT(8)
82#define HMVMX_UPDATED_GUEST_TR RT_BIT(9)
83#define HMVMX_UPDATED_GUEST_SEGMENT_REGS RT_BIT(10)
84#define HMVMX_UPDATED_GUEST_DEBUG RT_BIT(11)
85#define HMVMX_UPDATED_GUEST_FS_BASE_MSR RT_BIT(12)
86#define HMVMX_UPDATED_GUEST_GS_BASE_MSR RT_BIT(13)
87#define HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR RT_BIT(14)
88#define HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR RT_BIT(15)
89#define HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR RT_BIT(16)
90#define HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS RT_BIT(17)
91#define HMVMX_UPDATED_GUEST_ACTIVITY_STATE RT_BIT(18)
92#define HMVMX_UPDATED_GUEST_APIC_STATE RT_BIT(19)
93#define HMVMX_UPDATED_GUEST_ALL ( HMVMX_UPDATED_GUEST_RIP \
94 | HMVMX_UPDATED_GUEST_RSP \
95 | HMVMX_UPDATED_GUEST_RFLAGS \
96 | HMVMX_UPDATED_GUEST_CR0 \
97 | HMVMX_UPDATED_GUEST_CR3 \
98 | HMVMX_UPDATED_GUEST_CR4 \
99 | HMVMX_UPDATED_GUEST_GDTR \
100 | HMVMX_UPDATED_GUEST_IDTR \
101 | HMVMX_UPDATED_GUEST_LDTR \
102 | HMVMX_UPDATED_GUEST_TR \
103 | HMVMX_UPDATED_GUEST_SEGMENT_REGS \
104 | HMVMX_UPDATED_GUEST_DEBUG \
105 | HMVMX_UPDATED_GUEST_FS_BASE_MSR \
106 | HMVMX_UPDATED_GUEST_GS_BASE_MSR \
107 | HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR \
108 | HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR \
109 | HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR \
110 | HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS \
111 | HMVMX_UPDATED_GUEST_ACTIVITY_STATE \
112 | HMVMX_UPDATED_GUEST_APIC_STATE)
113/** @} */
114
115/** @name
116 * Flags to skip redundant reads of some common VMCS fields that are not part of
117 * the guest-CPU state but are in the transient structure.
118 */
119#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO RT_BIT(0)
120#define HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE RT_BIT(1)
121#define HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION RT_BIT(2)
122#define HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN RT_BIT(3)
123#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO RT_BIT(4)
124#define HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE RT_BIT(5)
125/** @} */
126
127/** @name
128 * States of the VMCS.
129 *
130 * This does not reflect all possible VMCS states but currently only those
131 * needed for maintaining the VMCS consistently even when thread-context hooks
132 * are used. Maybe later this can be extended (i.e. Nested Virtualization).
133 */
134#define HMVMX_VMCS_STATE_CLEAR RT_BIT(0)
135#define HMVMX_VMCS_STATE_ACTIVE RT_BIT(1)
136#define HMVMX_VMCS_STATE_LAUNCHED RT_BIT(2)
137/** @} */
138
139/**
140 * Exception bitmap mask for real-mode guests (real-on-v86).
141 *
142 * We need to intercept all exceptions manually (except #PF). #NM is also
143 * handled separately, see hmR0VmxLoadGuestControlRegs(). #PF need not be
144 * intercepted even in real-mode if we have Nested Paging support.
145 */
146#define HMVMX_REAL_MODE_XCPT_MASK ( RT_BIT(X86_XCPT_DE) | RT_BIT(X86_XCPT_DB) | RT_BIT(X86_XCPT_NMI) \
147 | RT_BIT(X86_XCPT_BP) | RT_BIT(X86_XCPT_OF) | RT_BIT(X86_XCPT_BR) \
148 | RT_BIT(X86_XCPT_UD) /* RT_BIT(X86_XCPT_NM) */ | RT_BIT(X86_XCPT_DF) \
149 | RT_BIT(X86_XCPT_CO_SEG_OVERRUN) | RT_BIT(X86_XCPT_TS) | RT_BIT(X86_XCPT_NP) \
150 | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_GP) /* RT_BIT(X86_XCPT_PF) */ \
151 | RT_BIT(X86_XCPT_MF) | RT_BIT(X86_XCPT_AC) | RT_BIT(X86_XCPT_MC) \
152 | RT_BIT(X86_XCPT_XF))
153
154/**
155 * Exception bitmap mask for all contributory exceptions.
156 *
157 * Page fault is deliberately excluded here as it's conditional as to whether
158 * it's contributory or benign. Page faults are handled separately.
159 */
160#define HMVMX_CONTRIBUTORY_XCPT_MASK ( RT_BIT(X86_XCPT_GP) | RT_BIT(X86_XCPT_NP) | RT_BIT(X86_XCPT_SS) | RT_BIT(X86_XCPT_TS) \
161 | RT_BIT(X86_XCPT_DE))
162
163/** Maximum VM-instruction error number. */
164#define HMVMX_INSTR_ERROR_MAX 28
165
166/** Profiling macro. */
167#ifdef HM_PROFILE_EXIT_DISPATCH
168# define HMVMX_START_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitDispatch, ed)
169# define HMVMX_STOP_EXIT_DISPATCH_PROF() STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitDispatch, ed)
170#else
171# define HMVMX_START_EXIT_DISPATCH_PROF() do { } while (0)
172# define HMVMX_STOP_EXIT_DISPATCH_PROF() do { } while (0)
173#endif
174
175/** Assert that preemption is disabled or covered by thread-context hooks. */
176#define HMVMX_ASSERT_PREEMPT_SAFE() Assert( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
177 || !RTThreadPreemptIsEnabled(NIL_RTTHREAD));
178
179/** Assert that we haven't migrated CPUs when thread-context hooks are not
180 * used. */
181#define HMVMX_ASSERT_CPU_SAFE() AssertMsg( VMMR0ThreadCtxHooksAreRegistered(pVCpu) \
182 || pVCpu->hm.s.idEnteredCpu == RTMpCpuId(), \
183 ("Illegal migration! Entered on CPU %u Current %u\n", \
184 pVCpu->hm.s.idEnteredCpu, RTMpCpuId())); \
185
186/*******************************************************************************
187* Structures and Typedefs *
188*******************************************************************************/
189/**
190 * VMX transient state.
191 *
192 * A state structure for holding miscellaneous information across
193 * VMX non-root operation and restored after the transition.
194 */
195typedef struct VMXTRANSIENT
196{
197 /** The host's rflags/eflags. */
198 RTCCUINTREG uEflags;
199#if HC_ARCH_BITS == 32
200 uint32_t u32Alignment0;
201#endif
202 /** The guest's LSTAR MSR value used for TPR patching for 32-bit guests. */
203 uint64_t u64LStarMsr;
204 /** The guest's TPR value used for TPR shadowing. */
205 uint8_t u8GuestTpr;
206 /** Alignment. */
207 uint8_t abAlignment0[7];
208
209 /** The basic VM-exit reason. */
210 uint16_t uExitReason;
211 /** Alignment. */
212 uint16_t u16Alignment0;
213 /** The VM-exit interruption error code. */
214 uint32_t uExitIntrErrorCode;
215 /** The VM-exit exit qualification. */
216 uint64_t uExitQualification;
217
218 /** The VM-exit interruption-information field. */
219 uint32_t uExitIntrInfo;
220 /** The VM-exit instruction-length field. */
221 uint32_t cbInstr;
222 /** The VM-exit instruction-information field. */
223 union
224 {
225 /** Plain unsigned int representation. */
226 uint32_t u;
227 /** INS and OUTS information. */
228 struct
229 {
230 uint32_t u6Reserved0 : 6;
231 /** The address size; 0=16-bit, 1=32-bit, 2=64-bit, rest undefined. */
232 uint32_t u3AddrSize : 3;
233 uint32_t u5Reserved1 : 5;
234 /** The segment register (X86_SREG_XXX). */
235 uint32_t iSegReg : 3;
236 uint32_t uReserved2 : 14;
237 } StrIo;
238 } ExitInstrInfo;
239 /** Whether the VM-entry failed or not. */
240 bool fVMEntryFailed;
241 /** Alignment. */
242 uint8_t abAlignment1[3];
243
244 /** The VM-entry interruption-information field. */
245 uint32_t uEntryIntrInfo;
246 /** The VM-entry exception error code field. */
247 uint32_t uEntryXcptErrorCode;
248 /** The VM-entry instruction length field. */
249 uint32_t cbEntryInstr;
250
251 /** IDT-vectoring information field. */
252 uint32_t uIdtVectoringInfo;
253 /** IDT-vectoring error code. */
254 uint32_t uIdtVectoringErrorCode;
255
256 /** Mask of currently read VMCS fields; HMVMX_UPDATED_TRANSIENT_*. */
257 uint32_t fVmcsFieldsRead;
258 /** Whether TSC-offsetting should be setup before VM-entry. */
259 bool fUpdateTscOffsettingAndPreemptTimer;
260 /** Whether the VM-exit was caused by a page-fault during delivery of a
261 * contributory exception or a page-fault. */
262 bool fVectoringPF;
263} VMXTRANSIENT;
264AssertCompileMemberAlignment(VMXTRANSIENT, uExitReason, sizeof(uint64_t));
265AssertCompileMemberAlignment(VMXTRANSIENT, uExitIntrInfo, sizeof(uint64_t));
266AssertCompileMemberAlignment(VMXTRANSIENT, uEntryIntrInfo, sizeof(uint64_t));
267AssertCompileMemberSize(VMXTRANSIENT, ExitInstrInfo, sizeof(uint32_t));
268/** Pointer to VMX transient state. */
269typedef VMXTRANSIENT *PVMXTRANSIENT;
270
271
272/**
273 * MSR-bitmap read permissions.
274 */
275typedef enum VMXMSREXITREAD
276{
277 /** Reading this MSR causes a VM-exit. */
278 VMXMSREXIT_INTERCEPT_READ = 0xb,
279 /** Reading this MSR does not cause a VM-exit. */
280 VMXMSREXIT_PASSTHRU_READ
281} VMXMSREXITREAD;
282
283/**
284 * MSR-bitmap write permissions.
285 */
286typedef enum VMXMSREXITWRITE
287{
288 /** Writing to this MSR causes a VM-exit. */
289 VMXMSREXIT_INTERCEPT_WRITE = 0xd,
290 /** Writing to this MSR does not cause a VM-exit. */
291 VMXMSREXIT_PASSTHRU_WRITE
292} VMXMSREXITWRITE;
293
294/**
295 * VM-exit handler.
296 *
297 * @returns VBox status code.
298 * @param pVCpu Pointer to the VMCPU.
299 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
300 * out-of-sync. Make sure to update the required
301 * fields before using them.
302 * @param pVmxTransient Pointer to the VMX-transient structure.
303 */
304#ifndef HMVMX_USE_FUNCTION_TABLE
305typedef int FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
306#else
307typedef DECLCALLBACK(int) FNVMEXITHANDLER(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
308/** Pointer to VM-exit handler. */
309typedef FNVMEXITHANDLER *PFNVMEXITHANDLER;
310#endif
311
312
313/*******************************************************************************
314* Internal Functions *
315*******************************************************************************/
316static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush);
317static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr);
318static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
319 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState);
320#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
321static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu);
322#endif
323#ifndef HMVMX_USE_FUNCTION_TABLE
324DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason);
325# define HMVMX_EXIT_DECL static int
326#else
327# define HMVMX_EXIT_DECL static DECLCALLBACK(int)
328#endif
329
330/** @name VM-exit handlers.
331 * @{
332 */
333static FNVMEXITHANDLER hmR0VmxExitXcptOrNmi;
334static FNVMEXITHANDLER hmR0VmxExitExtInt;
335static FNVMEXITHANDLER hmR0VmxExitTripleFault;
336static FNVMEXITHANDLER hmR0VmxExitInitSignal;
337static FNVMEXITHANDLER hmR0VmxExitSipi;
338static FNVMEXITHANDLER hmR0VmxExitIoSmi;
339static FNVMEXITHANDLER hmR0VmxExitSmi;
340static FNVMEXITHANDLER hmR0VmxExitIntWindow;
341static FNVMEXITHANDLER hmR0VmxExitNmiWindow;
342static FNVMEXITHANDLER hmR0VmxExitTaskSwitch;
343static FNVMEXITHANDLER hmR0VmxExitCpuid;
344static FNVMEXITHANDLER hmR0VmxExitGetsec;
345static FNVMEXITHANDLER hmR0VmxExitHlt;
346static FNVMEXITHANDLER hmR0VmxExitInvd;
347static FNVMEXITHANDLER hmR0VmxExitInvlpg;
348static FNVMEXITHANDLER hmR0VmxExitRdpmc;
349static FNVMEXITHANDLER hmR0VmxExitRdtsc;
350static FNVMEXITHANDLER hmR0VmxExitRsm;
351static FNVMEXITHANDLER hmR0VmxExitSetPendingXcptUD;
352static FNVMEXITHANDLER hmR0VmxExitMovCRx;
353static FNVMEXITHANDLER hmR0VmxExitMovDRx;
354static FNVMEXITHANDLER hmR0VmxExitIoInstr;
355static FNVMEXITHANDLER hmR0VmxExitRdmsr;
356static FNVMEXITHANDLER hmR0VmxExitWrmsr;
357static FNVMEXITHANDLER hmR0VmxExitErrInvalidGuestState;
358static FNVMEXITHANDLER hmR0VmxExitErrMsrLoad;
359static FNVMEXITHANDLER hmR0VmxExitErrUndefined;
360static FNVMEXITHANDLER hmR0VmxExitMwait;
361static FNVMEXITHANDLER hmR0VmxExitMtf;
362static FNVMEXITHANDLER hmR0VmxExitMonitor;
363static FNVMEXITHANDLER hmR0VmxExitPause;
364static FNVMEXITHANDLER hmR0VmxExitErrMachineCheck;
365static FNVMEXITHANDLER hmR0VmxExitTprBelowThreshold;
366static FNVMEXITHANDLER hmR0VmxExitApicAccess;
367static FNVMEXITHANDLER hmR0VmxExitXdtrAccess;
368static FNVMEXITHANDLER hmR0VmxExitXdtrAccess;
369static FNVMEXITHANDLER hmR0VmxExitEptViolation;
370static FNVMEXITHANDLER hmR0VmxExitEptMisconfig;
371static FNVMEXITHANDLER hmR0VmxExitRdtscp;
372static FNVMEXITHANDLER hmR0VmxExitPreemptTimer;
373static FNVMEXITHANDLER hmR0VmxExitWbinvd;
374static FNVMEXITHANDLER hmR0VmxExitXsetbv;
375static FNVMEXITHANDLER hmR0VmxExitRdrand;
376static FNVMEXITHANDLER hmR0VmxExitInvpcid;
377/** @} */
378
379static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
380static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
381static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
382static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
383static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
384static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
385static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient);
386static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx);
387
388/*******************************************************************************
389* Global Variables *
390*******************************************************************************/
391#ifdef HMVMX_USE_FUNCTION_TABLE
392
393/**
394 * VMX_EXIT dispatch table.
395 */
396static const PFNVMEXITHANDLER g_apfnVMExitHandlers[VMX_EXIT_MAX + 1] =
397{
398 /* 00 VMX_EXIT_XCPT_OR_NMI */ hmR0VmxExitXcptOrNmi,
399 /* 01 VMX_EXIT_EXT_INT */ hmR0VmxExitExtInt,
400 /* 02 VMX_EXIT_TRIPLE_FAULT */ hmR0VmxExitTripleFault,
401 /* 03 VMX_EXIT_INIT_SIGNAL */ hmR0VmxExitInitSignal,
402 /* 04 VMX_EXIT_SIPI */ hmR0VmxExitSipi,
403 /* 05 VMX_EXIT_IO_SMI */ hmR0VmxExitIoSmi,
404 /* 06 VMX_EXIT_SMI */ hmR0VmxExitSmi,
405 /* 07 VMX_EXIT_INT_WINDOW */ hmR0VmxExitIntWindow,
406 /* 08 VMX_EXIT_NMI_WINDOW */ hmR0VmxExitNmiWindow,
407 /* 09 VMX_EXIT_TASK_SWITCH */ hmR0VmxExitTaskSwitch,
408 /* 10 VMX_EXIT_CPUID */ hmR0VmxExitCpuid,
409 /* 11 VMX_EXIT_GETSEC */ hmR0VmxExitGetsec,
410 /* 12 VMX_EXIT_HLT */ hmR0VmxExitHlt,
411 /* 13 VMX_EXIT_INVD */ hmR0VmxExitInvd,
412 /* 14 VMX_EXIT_INVLPG */ hmR0VmxExitInvlpg,
413 /* 15 VMX_EXIT_RDPMC */ hmR0VmxExitRdpmc,
414 /* 16 VMX_EXIT_RDTSC */ hmR0VmxExitRdtsc,
415 /* 17 VMX_EXIT_RSM */ hmR0VmxExitRsm,
416 /* 18 VMX_EXIT_VMCALL */ hmR0VmxExitSetPendingXcptUD,
417 /* 19 VMX_EXIT_VMCLEAR */ hmR0VmxExitSetPendingXcptUD,
418 /* 20 VMX_EXIT_VMLAUNCH */ hmR0VmxExitSetPendingXcptUD,
419 /* 21 VMX_EXIT_VMPTRLD */ hmR0VmxExitSetPendingXcptUD,
420 /* 22 VMX_EXIT_VMPTRST */ hmR0VmxExitSetPendingXcptUD,
421 /* 23 VMX_EXIT_VMREAD */ hmR0VmxExitSetPendingXcptUD,
422 /* 24 VMX_EXIT_VMRESUME */ hmR0VmxExitSetPendingXcptUD,
423 /* 25 VMX_EXIT_VMWRITE */ hmR0VmxExitSetPendingXcptUD,
424 /* 26 VMX_EXIT_VMXOFF */ hmR0VmxExitSetPendingXcptUD,
425 /* 27 VMX_EXIT_VMXON */ hmR0VmxExitSetPendingXcptUD,
426 /* 28 VMX_EXIT_MOV_CRX */ hmR0VmxExitMovCRx,
427 /* 29 VMX_EXIT_MOV_DRX */ hmR0VmxExitMovDRx,
428 /* 30 VMX_EXIT_IO_INSTR */ hmR0VmxExitIoInstr,
429 /* 31 VMX_EXIT_RDMSR */ hmR0VmxExitRdmsr,
430 /* 32 VMX_EXIT_WRMSR */ hmR0VmxExitWrmsr,
431 /* 33 VMX_EXIT_ERR_INVALID_GUEST_STATE */ hmR0VmxExitErrInvalidGuestState,
432 /* 34 VMX_EXIT_ERR_MSR_LOAD */ hmR0VmxExitErrMsrLoad,
433 /* 35 UNDEFINED */ hmR0VmxExitErrUndefined,
434 /* 36 VMX_EXIT_MWAIT */ hmR0VmxExitMwait,
435 /* 37 VMX_EXIT_MTF */ hmR0VmxExitMtf,
436 /* 38 UNDEFINED */ hmR0VmxExitErrUndefined,
437 /* 39 VMX_EXIT_MONITOR */ hmR0VmxExitMonitor,
438 /* 40 UNDEFINED */ hmR0VmxExitPause,
439 /* 41 VMX_EXIT_PAUSE */ hmR0VmxExitErrMachineCheck,
440 /* 42 VMX_EXIT_ERR_MACHINE_CHECK */ hmR0VmxExitErrUndefined,
441 /* 43 VMX_EXIT_TPR_BELOW_THRESHOLD */ hmR0VmxExitTprBelowThreshold,
442 /* 44 VMX_EXIT_APIC_ACCESS */ hmR0VmxExitApicAccess,
443 /* 45 UNDEFINED */ hmR0VmxExitErrUndefined,
444 /* 46 VMX_EXIT_XDTR_ACCESS */ hmR0VmxExitXdtrAccess,
445 /* 47 VMX_EXIT_TR_ACCESS */ hmR0VmxExitXdtrAccess,
446 /* 48 VMX_EXIT_EPT_VIOLATION */ hmR0VmxExitEptViolation,
447 /* 49 VMX_EXIT_EPT_MISCONFIG */ hmR0VmxExitEptMisconfig,
448 /* 50 VMX_EXIT_INVEPT */ hmR0VmxExitSetPendingXcptUD,
449 /* 51 VMX_EXIT_RDTSCP */ hmR0VmxExitRdtscp,
450 /* 52 VMX_EXIT_PREEMPT_TIMER */ hmR0VmxExitPreemptTimer,
451 /* 53 VMX_EXIT_INVVPID */ hmR0VmxExitSetPendingXcptUD,
452 /* 54 VMX_EXIT_WBINVD */ hmR0VmxExitWbinvd,
453 /* 55 VMX_EXIT_XSETBV */ hmR0VmxExitXsetbv,
454 /* 56 UNDEFINED */ hmR0VmxExitErrUndefined,
455 /* 57 VMX_EXIT_RDRAND */ hmR0VmxExitRdrand,
456 /* 58 VMX_EXIT_INVPCID */ hmR0VmxExitInvpcid,
457 /* 59 VMX_EXIT_VMFUNC */ hmR0VmxExitSetPendingXcptUD
458};
459#endif /* HMVMX_USE_FUNCTION_TABLE */
460
461#ifdef VBOX_STRICT
462static const char * const g_apszVmxInstrErrors[HMVMX_INSTR_ERROR_MAX + 1] =
463{
464 /* 0 */ "(Not Used)",
465 /* 1 */ "VMCALL executed in VMX root operation.",
466 /* 2 */ "VMCLEAR with invalid physical address.",
467 /* 3 */ "VMCLEAR with VMXON pointer.",
468 /* 4 */ "VMLAUNCH with non-clear VMCS.",
469 /* 5 */ "VMRESUME with non-launched VMCS.",
470 /* 6 */ "VMRESUME after VMXOFF",
471 /* 7 */ "VM entry with invalid control fields.",
472 /* 8 */ "VM entry with invalid host state fields.",
473 /* 9 */ "VMPTRLD with invalid physical address.",
474 /* 10 */ "VMPTRLD with VMXON pointer.",
475 /* 11 */ "VMPTRLD with incorrect revision identifier.",
476 /* 12 */ "VMREAD/VMWRITE from/to unsupported VMCS component.",
477 /* 13 */ "VMWRITE to read-only VMCS component.",
478 /* 14 */ "(Not Used)",
479 /* 15 */ "VMXON executed in VMX root operation.",
480 /* 16 */ "VM entry with invalid executive-VMCS pointer.",
481 /* 17 */ "VM entry with non-launched executing VMCS.",
482 /* 18 */ "VM entry with executive-VMCS pointer not VMXON pointer.",
483 /* 19 */ "VMCALL with non-clear VMCS.",
484 /* 20 */ "VMCALL with invalid VM-exit control fields.",
485 /* 21 */ "(Not Used)",
486 /* 22 */ "VMCALL with incorrect MSEG revision identifier.",
487 /* 23 */ "VMXOFF under dual monitor treatment of SMIs and SMM.",
488 /* 24 */ "VMCALL with invalid SMM-monitor features.",
489 /* 25 */ "VM entry with invalid VM-execution control fields in executive VMCS.",
490 /* 26 */ "VM entry with events blocked by MOV SS.",
491 /* 27 */ "(Not Used)",
492 /* 28 */ "Invalid operand to INVEPT/INVVPID."
493};
494#endif /* VBOX_STRICT */
495
496
497
498/**
499 * Updates the VM's last error record. If there was a VMX instruction error,
500 * reads the error data from the VMCS and updates VCPU's last error record as
501 * well.
502 *
503 * @param pVM Pointer to the VM.
504 * @param pVCpu Pointer to the VMCPU (can be NULL if @a rc is not
505 * VERR_VMX_UNABLE_TO_START_VM or
506 * VERR_VMX_INVALID_VMCS_FIELD).
507 * @param rc The error code.
508 */
509static void hmR0VmxUpdateErrorRecord(PVM pVM, PVMCPU pVCpu, int rc)
510{
511 AssertPtr(pVM);
512 if ( rc == VERR_VMX_INVALID_VMCS_FIELD
513 || rc == VERR_VMX_UNABLE_TO_START_VM)
514 {
515 AssertPtrReturnVoid(pVCpu);
516 VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
517 }
518 pVM->hm.s.lLastError = rc;
519}
520
521
522/**
523 * Reads the VM-entry interruption-information field from the VMCS into the VMX
524 * transient structure.
525 *
526 * @returns VBox status code.
527 * @param pVmxTransient Pointer to the VMX transient structure.
528 *
529 * @remarks No-long-jump zone!!!
530 */
531DECLINLINE(int) hmR0VmxReadEntryIntrInfoVmcs(PVMXTRANSIENT pVmxTransient)
532{
533 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &pVmxTransient->uEntryIntrInfo);
534 AssertRCReturn(rc, rc);
535 return VINF_SUCCESS;
536}
537
538
539/**
540 * Reads the VM-entry exception error code field from the VMCS into
541 * the VMX transient structure.
542 *
543 * @returns VBox status code.
544 * @param pVmxTransient Pointer to the VMX transient structure.
545 *
546 * @remarks No-long-jump zone!!!
547 */
548DECLINLINE(int) hmR0VmxReadEntryXcptErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
549{
550 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &pVmxTransient->uEntryXcptErrorCode);
551 AssertRCReturn(rc, rc);
552 return VINF_SUCCESS;
553}
554
555
556/**
557 * Reads the VM-entry exception error code field from the VMCS into
558 * the VMX transient structure.
559 *
560 * @returns VBox status code.
561 * @param pVCpu Pointer to the VMCPU.
562 * @param pVmxTransient Pointer to the VMX transient structure.
563 *
564 * @remarks No-long-jump zone!!!
565 */
566DECLINLINE(int) hmR0VmxReadEntryInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
567{
568 int rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &pVmxTransient->cbEntryInstr);
569 AssertRCReturn(rc, rc);
570 return VINF_SUCCESS;
571}
572
573
574/**
575 * Reads the VM-exit interruption-information field from the VMCS into the VMX
576 * transient structure.
577 *
578 * @returns VBox status code.
579 * @param pVCpu Pointer to the VMCPU.
580 * @param pVmxTransient Pointer to the VMX transient structure.
581 */
582DECLINLINE(int) hmR0VmxReadExitIntrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
583{
584 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO))
585 {
586 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO, &pVmxTransient->uExitIntrInfo);
587 AssertRCReturn(rc, rc);
588 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO;
589 }
590 return VINF_SUCCESS;
591}
592
593
594/**
595 * Reads the VM-exit interruption error code from the VMCS into the VMX
596 * transient structure.
597 *
598 * @returns VBox status code.
599 * @param pVCpu Pointer to the VMCPU.
600 * @param pVmxTransient Pointer to the VMX transient structure.
601 */
602DECLINLINE(int) hmR0VmxReadExitIntrErrorCodeVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
603{
604 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE))
605 {
606 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE, &pVmxTransient->uExitIntrErrorCode);
607 AssertRCReturn(rc, rc);
608 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_ERROR_CODE;
609 }
610 return VINF_SUCCESS;
611}
612
613
614/**
615 * Reads the VM-exit instruction length field from the VMCS into the VMX
616 * transient structure.
617 *
618 * @returns VBox status code.
619 * @param pVCpu Pointer to the VMCPU.
620 * @param pVmxTransient Pointer to the VMX transient structure.
621 */
622DECLINLINE(int) hmR0VmxReadExitInstrLenVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
623{
624 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
625 {
626 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &pVmxTransient->cbInstr);
627 AssertRCReturn(rc, rc);
628 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
629 }
630 return VINF_SUCCESS;
631}
632
633
634/**
635 * Reads the VM-exit instruction-information field from the VMCS into
636 * the VMX transient structure.
637 *
638 * @returns VBox status code.
639 * @param pVCpu The cross context per CPU structure.
640 * @param pVmxTransient Pointer to the VMX transient structure.
641 */
642DECLINLINE(int) hmR0VmxReadExitInstrInfoVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
643{
644 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN))
645 {
646 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_INSTR_INFO, &pVmxTransient->cbInstr);
647 AssertRCReturn(rc, rc);
648 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_INSTR_LEN;
649 }
650 return VINF_SUCCESS;
651}
652
653
654/**
655 * Reads the exit qualification from the VMCS into the VMX transient structure.
656 *
657 * @returns VBox status code.
658 * @param pVCpu Pointer to the VMCPU.
659 * @param pVmxTransient Pointer to the VMX transient structure.
660 */
661DECLINLINE(int) hmR0VmxReadExitQualificationVmcs(PVMCPU pVCpu, PVMXTRANSIENT pVmxTransient)
662{
663 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION))
664 {
665 int rc = VMXReadVmcsGstN(VMX_VMCS_RO_EXIT_QUALIFICATION, &pVmxTransient->uExitQualification);
666 AssertRCReturn(rc, rc);
667 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_EXIT_QUALIFICATION;
668 }
669 return VINF_SUCCESS;
670}
671
672
673/**
674 * Reads the IDT-vectoring information field from the VMCS into the VMX
675 * transient structure.
676 *
677 * @returns VBox status code.
678 * @param pVmxTransient Pointer to the VMX transient structure.
679 *
680 * @remarks No-long-jump zone!!!
681 */
682DECLINLINE(int) hmR0VmxReadIdtVectoringInfoVmcs(PVMXTRANSIENT pVmxTransient)
683{
684 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO))
685 {
686 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_INFO, &pVmxTransient->uIdtVectoringInfo);
687 AssertRCReturn(rc, rc);
688 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_INFO;
689 }
690 return VINF_SUCCESS;
691}
692
693
694/**
695 * Reads the IDT-vectoring error code from the VMCS into the VMX
696 * transient structure.
697 *
698 * @returns VBox status code.
699 * @param pVmxTransient Pointer to the VMX transient structure.
700 */
701DECLINLINE(int) hmR0VmxReadIdtVectoringErrorCodeVmcs(PVMXTRANSIENT pVmxTransient)
702{
703 if (!(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE))
704 {
705 int rc = VMXReadVmcs32(VMX_VMCS32_RO_IDT_ERROR_CODE, &pVmxTransient->uIdtVectoringErrorCode);
706 AssertRCReturn(rc, rc);
707 pVmxTransient->fVmcsFieldsRead |= HMVMX_UPDATED_TRANSIENT_IDT_VECTORING_ERROR_CODE;
708 }
709 return VINF_SUCCESS;
710}
711
712
713/**
714 * Enters VMX root mode operation on the current CPU.
715 *
716 * @returns VBox status code.
717 * @param pVM Pointer to the VM (optional, can be NULL, after
718 * a resume).
719 * @param HCPhysCpuPage Physical address of the VMXON region.
720 * @param pvCpuPage Pointer to the VMXON region.
721 */
722static int hmR0VmxEnterRootMode(PVM pVM, RTHCPHYS HCPhysCpuPage, void *pvCpuPage)
723{
724 AssertReturn(HCPhysCpuPage != 0 && HCPhysCpuPage != NIL_RTHCPHYS, VERR_INVALID_PARAMETER);
725 AssertReturn(pvCpuPage, VERR_INVALID_PARAMETER);
726 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
727
728 if (pVM)
729 {
730 /* Write the VMCS revision dword to the VMXON region. */
731 *(uint32_t *)pvCpuPage = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
732 }
733
734 /* Enable the VMX bit in CR4 if necessary. */
735 RTCCUINTREG uCr4 = ASMGetCR4();
736 if (!(uCr4 & X86_CR4_VMXE))
737 ASMSetCR4(uCr4 | X86_CR4_VMXE);
738
739 /* Enter VMX root mode. */
740 int rc = VMXEnable(HCPhysCpuPage);
741 if (RT_FAILURE(rc))
742 ASMSetCR4(uCr4);
743
744 return rc;
745}
746
747
748/**
749 * Exits VMX root mode operation on the current CPU.
750 *
751 * @returns VBox status code.
752 */
753static int hmR0VmxLeaveRootMode(void)
754{
755 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
756
757 /* If we're for some reason not in VMX root mode, then don't leave it. */
758 RTCCUINTREG uHostCR4 = ASMGetCR4();
759 if (uHostCR4 & X86_CR4_VMXE)
760 {
761 /* Exit VMX root mode and clear the VMX bit in CR4. */
762 VMXDisable();
763 ASMSetCR4(uHostCR4 & ~X86_CR4_VMXE);
764 return VINF_SUCCESS;
765 }
766
767 return VERR_VMX_NOT_IN_VMX_ROOT_MODE;
768}
769
770
771/**
772 * Allocates and maps one physically contiguous page. The allocated page is
773 * zero'd out. (Used by various VT-x structures).
774 *
775 * @returns IPRT status code.
776 * @param pMemObj Pointer to the ring-0 memory object.
777 * @param ppVirt Where to store the virtual address of the
778 * allocation.
779 * @param pPhys Where to store the physical address of the
780 * allocation.
781 */
782DECLINLINE(int) hmR0VmxPageAllocZ(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
783{
784 AssertPtrReturn(pMemObj, VERR_INVALID_PARAMETER);
785 AssertPtrReturn(ppVirt, VERR_INVALID_PARAMETER);
786 AssertPtrReturn(pHCPhys, VERR_INVALID_PARAMETER);
787
788 int rc = RTR0MemObjAllocCont(pMemObj, PAGE_SIZE, false /* fExecutable */);
789 if (RT_FAILURE(rc))
790 return rc;
791 *ppVirt = RTR0MemObjAddress(*pMemObj);
792 *pHCPhys = RTR0MemObjGetPagePhysAddr(*pMemObj, 0 /* iPage */);
793 ASMMemZero32(*ppVirt, PAGE_SIZE);
794 return VINF_SUCCESS;
795}
796
797
798/**
799 * Frees and unmaps an allocated physical page.
800 *
801 * @param pMemObj Pointer to the ring-0 memory object.
802 * @param ppVirt Where to re-initialize the virtual address of
803 * allocation as 0.
804 * @param pHCPhys Where to re-initialize the physical address of the
805 * allocation as 0.
806 */
807DECLINLINE(void) hmR0VmxPageFree(PRTR0MEMOBJ pMemObj, PRTR0PTR ppVirt, PRTHCPHYS pHCPhys)
808{
809 AssertPtr(pMemObj);
810 AssertPtr(ppVirt);
811 AssertPtr(pHCPhys);
812 if (*pMemObj != NIL_RTR0MEMOBJ)
813 {
814 int rc = RTR0MemObjFree(*pMemObj, true /* fFreeMappings */);
815 AssertRC(rc);
816 *pMemObj = NIL_RTR0MEMOBJ;
817 *ppVirt = 0;
818 *pHCPhys = 0;
819 }
820}
821
822
823/**
824 * Worker function to free VT-x related structures.
825 *
826 * @returns IPRT status code.
827 * @param pVM Pointer to the VM.
828 */
829static void hmR0VmxStructsFree(PVM pVM)
830{
831 for (VMCPUID i = 0; i < pVM->cCpus; i++)
832 {
833 PVMCPU pVCpu = &pVM->aCpus[i];
834 AssertPtr(pVCpu);
835
836#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
837 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
838 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
839#endif
840
841 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
842 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap, &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
843
844 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic, &pVCpu->hm.s.vmx.HCPhysVirtApic);
845 hmR0VmxPageFree(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
846 }
847
848 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess, &pVM->hm.s.vmx.HCPhysApicAccess);
849#ifdef VBOX_WITH_CRASHDUMP_MAGIC
850 hmR0VmxPageFree(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
851#endif
852}
853
854
855/**
856 * Worker function to allocate VT-x related VM structures.
857 *
858 * @returns IPRT status code.
859 * @param pVM Pointer to the VM.
860 */
861static int hmR0VmxStructsAlloc(PVM pVM)
862{
863 /*
864 * Initialize members up-front so we can cleanup properly on allocation failure.
865 */
866#define VMXLOCAL_INIT_VM_MEMOBJ(a_Name, a_VirtPrefix) \
867 pVM->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
868 pVM->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
869 pVM->hm.s.vmx.HCPhys##a_Name = 0;
870
871#define VMXLOCAL_INIT_VMCPU_MEMOBJ(a_Name, a_VirtPrefix) \
872 pVCpu->hm.s.vmx.hMemObj##a_Name = NIL_RTR0MEMOBJ; \
873 pVCpu->hm.s.vmx.a_VirtPrefix##a_Name = 0; \
874 pVCpu->hm.s.vmx.HCPhys##a_Name = 0;
875
876#ifdef VBOX_WITH_CRASHDUMP_MAGIC
877 VMXLOCAL_INIT_VM_MEMOBJ(Scratch, pv);
878#endif
879 VMXLOCAL_INIT_VM_MEMOBJ(ApicAccess, pb);
880
881 AssertCompile(sizeof(VMCPUID) == sizeof(pVM->cCpus));
882 for (VMCPUID i = 0; i < pVM->cCpus; i++)
883 {
884 PVMCPU pVCpu = &pVM->aCpus[i];
885 VMXLOCAL_INIT_VMCPU_MEMOBJ(Vmcs, pv);
886 VMXLOCAL_INIT_VMCPU_MEMOBJ(VirtApic, pb);
887 VMXLOCAL_INIT_VMCPU_MEMOBJ(MsrBitmap, pv);
888#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
889 VMXLOCAL_INIT_VMCPU_MEMOBJ(GuestMsr, pv);
890 VMXLOCAL_INIT_VMCPU_MEMOBJ(HostMsr, pv);
891#endif
892 }
893#undef VMXLOCAL_INIT_VMCPU_MEMOBJ
894#undef VMXLOCAL_INIT_VM_MEMOBJ
895
896 /*
897 * Allocate all the VT-x structures.
898 */
899 int rc = VINF_SUCCESS;
900#ifdef VBOX_WITH_CRASHDUMP_MAGIC
901 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjScratch, &pVM->hm.s.vmx.pbScratch, &pVM->hm.s.vmx.HCPhysScratch);
902 if (RT_FAILURE(rc))
903 goto cleanup;
904 strcpy((char *)pVM->hm.s.vmx.pbScratch, "SCRATCH Magic");
905 *(uint64_t *)(pVM->hm.s.vmx.pbScratch + 16) = UINT64_C(0xdeadbeefdeadbeef);
906#endif
907
908 /* Allocate the APIC-access page for trapping APIC accesses from the guest. */
909 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
910 {
911 rc = hmR0VmxPageAllocZ(&pVM->hm.s.vmx.hMemObjApicAccess, (PRTR0PTR)&pVM->hm.s.vmx.pbApicAccess,
912 &pVM->hm.s.vmx.HCPhysApicAccess);
913 if (RT_FAILURE(rc))
914 goto cleanup;
915 }
916
917 /*
918 * Initialize per-VCPU VT-x structures.
919 */
920 for (VMCPUID i = 0; i < pVM->cCpus; i++)
921 {
922 PVMCPU pVCpu = &pVM->aCpus[i];
923 AssertPtr(pVCpu);
924
925 /* Allocate the VM control structure (VMCS). */
926 AssertReturn(MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(pVM->hm.s.vmx.msr.vmx_basic_info) <= PAGE_SIZE, VERR_INTERNAL_ERROR);
927 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVmcs, &pVCpu->hm.s.vmx.pvVmcs, &pVCpu->hm.s.vmx.HCPhysVmcs);
928 if (RT_FAILURE(rc))
929 goto cleanup;
930
931 /* Allocate the Virtual-APIC page for transparent TPR accesses. */
932 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
933 {
934 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjVirtApic, (PRTR0PTR)&pVCpu->hm.s.vmx.pbVirtApic,
935 &pVCpu->hm.s.vmx.HCPhysVirtApic);
936 if (RT_FAILURE(rc))
937 goto cleanup;
938 }
939
940 /* Allocate the MSR-bitmap if supported by the CPU. The MSR-bitmap is for transparent accesses of specific MSRs. */
941 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
942 {
943 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjMsrBitmap, &pVCpu->hm.s.vmx.pvMsrBitmap,
944 &pVCpu->hm.s.vmx.HCPhysMsrBitmap);
945 if (RT_FAILURE(rc))
946 goto cleanup;
947 memset(pVCpu->hm.s.vmx.pvMsrBitmap, 0xff, PAGE_SIZE);
948 }
949
950#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
951 /* Allocate the VM-entry MSR-load and VM-exit MSR-store page for the guest MSRs. */
952 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjGuestMsr, &pVCpu->hm.s.vmx.pvGuestMsr, &pVCpu->hm.s.vmx.HCPhysGuestMsr);
953 if (RT_FAILURE(rc))
954 goto cleanup;
955
956 /* Allocate the VM-exit MSR-load page for the host MSRs. */
957 rc = hmR0VmxPageAllocZ(&pVCpu->hm.s.vmx.hMemObjHostMsr, &pVCpu->hm.s.vmx.pvHostMsr, &pVCpu->hm.s.vmx.HCPhysHostMsr);
958 if (RT_FAILURE(rc))
959 goto cleanup;
960#endif
961 }
962
963 return VINF_SUCCESS;
964
965cleanup:
966 hmR0VmxStructsFree(pVM);
967 return rc;
968}
969
970
971/**
972 * Does global VT-x initialization (called during module initialization).
973 *
974 * @returns VBox status code.
975 */
976VMMR0DECL(int) VMXR0GlobalInit(void)
977{
978#ifdef HMVMX_USE_FUNCTION_TABLE
979 AssertCompile(VMX_EXIT_MAX + 1 == RT_ELEMENTS(g_apfnVMExitHandlers));
980# ifdef VBOX_STRICT
981 for (unsigned i = 0; i < RT_ELEMENTS(g_apfnVMExitHandlers); i++)
982 Assert(g_apfnVMExitHandlers[i]);
983# endif
984#endif
985 return VINF_SUCCESS;
986}
987
988
989/**
990 * Does global VT-x termination (called during module termination).
991 */
992VMMR0DECL(void) VMXR0GlobalTerm()
993{
994 /* Nothing to do currently. */
995}
996
997
998/**
999 * Sets up and activates VT-x on the current CPU.
1000 *
1001 * @returns VBox status code.
1002 * @param pCpu Pointer to the global CPU info struct.
1003 * @param pVM Pointer to the VM (can be NULL after a host resume
1004 * operation).
1005 * @param pvCpuPage Pointer to the VMXON region (can be NULL if @a
1006 * fEnabledByHost is true).
1007 * @param HCPhysCpuPage Physical address of the VMXON region (can be 0 if
1008 * @a fEnabledByHost is true).
1009 * @param fEnabledByHost Set if SUPR0EnableVTx() or similar was used to
1010 * enable VT-x on the host.
1011 */
1012VMMR0DECL(int) VMXR0EnableCpu(PHMGLOBALCPUINFO pCpu, PVM pVM, void *pvCpuPage, RTHCPHYS HCPhysCpuPage, bool fEnabledByHost)
1013{
1014 AssertReturn(pCpu, VERR_INVALID_PARAMETER);
1015 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1016
1017 if (!fEnabledByHost)
1018 {
1019 int rc = hmR0VmxEnterRootMode(pVM, HCPhysCpuPage, pvCpuPage);
1020 if (RT_FAILURE(rc))
1021 return rc;
1022 }
1023
1024 /*
1025 * Flush all EPTP tagged-TLB entries (in case any other hypervisor have been using EPTPs) so that
1026 * we can avoid an explicit flush while using new VPIDs. We would still need to flush
1027 * each time while reusing a VPID after hitting the MaxASID limit once.
1028 */
1029 if ( pVM
1030 && pVM->hm.s.fNestedPaging)
1031 {
1032 /* We require ALL_CONTEXT flush-type to be available on the CPU. See hmR0VmxSetupTaggedTlb(). */
1033 Assert(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS);
1034 hmR0VmxFlushEpt(pVM, NULL /* pVCpu */, VMX_FLUSH_EPT_ALL_CONTEXTS);
1035 pCpu->fFlushAsidBeforeUse = false;
1036 }
1037 else
1038 {
1039 /** @todo This is still not perfect. If on host resume (pVM is NULL or a VM
1040 * without Nested Paging triggered this function) we still have the risk
1041 * of potentially running with stale TLB-entries from other hypervisors
1042 * when later we use a VM with NestedPaging. To fix this properly we will
1043 * have to pass '&g_HvmR0' (see HMR0.cpp) to this function and read
1044 * 'vmx_ept_vpid_caps' from it. Sigh. */
1045 pCpu->fFlushAsidBeforeUse = true;
1046 }
1047
1048 /* Ensure each VCPU scheduled on this CPU gets a new VPID on resume. See @bugref{6255}. */
1049 ++pCpu->cTlbFlushes;
1050
1051 return VINF_SUCCESS;
1052}
1053
1054
1055/**
1056 * Deactivates VT-x on the current CPU.
1057 *
1058 * @returns VBox status code.
1059 * @param pCpu Pointer to the global CPU info struct.
1060 * @param pvCpuPage Pointer to the VMXON region.
1061 * @param HCPhysCpuPage Physical address of the VMXON region.
1062 *
1063 * @remarks This function should never be called when SUPR0EnableVTx() or
1064 * similar was used to enable VT-x on the host.
1065 */
1066VMMR0DECL(int) VMXR0DisableCpu(PHMGLOBALCPUINFO pCpu, void *pvCpuPage, RTHCPHYS HCPhysCpuPage)
1067{
1068 NOREF(pCpu);
1069 NOREF(pvCpuPage);
1070 NOREF(HCPhysCpuPage);
1071
1072 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
1073 return hmR0VmxLeaveRootMode();
1074}
1075
1076
1077/**
1078 * Sets the permission bits for the specified MSR in the MSR bitmap.
1079 *
1080 * @param pVCpu Pointer to the VMCPU.
1081 * @param uMSR The MSR value.
1082 * @param enmRead Whether reading this MSR causes a VM-exit.
1083 * @param enmWrite Whether writing this MSR causes a VM-exit.
1084 */
1085static void hmR0VmxSetMsrPermission(PVMCPU pVCpu, uint32_t uMsr, VMXMSREXITREAD enmRead, VMXMSREXITWRITE enmWrite)
1086{
1087 int32_t iBit;
1088 uint8_t *pbMsrBitmap = (uint8_t *)pVCpu->hm.s.vmx.pvMsrBitmap;
1089
1090 /*
1091 * Layout:
1092 * 0x000 - 0x3ff - Low MSR read bits
1093 * 0x400 - 0x7ff - High MSR read bits
1094 * 0x800 - 0xbff - Low MSR write bits
1095 * 0xc00 - 0xfff - High MSR write bits
1096 */
1097 if (uMsr <= 0x00001FFF)
1098 iBit = uMsr;
1099 else if ( uMsr >= 0xC0000000
1100 && uMsr <= 0xC0001FFF)
1101 {
1102 iBit = (uMsr - 0xC0000000);
1103 pbMsrBitmap += 0x400;
1104 }
1105 else
1106 {
1107 AssertMsgFailed(("hmR0VmxSetMsrPermission: Invalid MSR %#RX32\n", uMsr));
1108 return;
1109 }
1110
1111 Assert(iBit <= 0x1fff);
1112 if (enmRead == VMXMSREXIT_INTERCEPT_READ)
1113 ASMBitSet(pbMsrBitmap, iBit);
1114 else
1115 ASMBitClear(pbMsrBitmap, iBit);
1116
1117 if (enmWrite == VMXMSREXIT_INTERCEPT_WRITE)
1118 ASMBitSet(pbMsrBitmap + 0x800, iBit);
1119 else
1120 ASMBitClear(pbMsrBitmap + 0x800, iBit);
1121}
1122
1123
1124/**
1125 * Flushes the TLB using EPT.
1126 *
1127 * @returns VBox status code.
1128 * @param pVM Pointer to the VM.
1129 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1130 * enmFlush).
1131 * @param enmFlush Type of flush.
1132 */
1133static void hmR0VmxFlushEpt(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_EPT enmFlush)
1134{
1135 AssertPtr(pVM);
1136 Assert(pVM->hm.s.fNestedPaging);
1137
1138 uint64_t descriptor[2];
1139 if (enmFlush == VMX_FLUSH_EPT_ALL_CONTEXTS)
1140 descriptor[0] = 0;
1141 else
1142 {
1143 Assert(pVCpu);
1144 descriptor[0] = pVCpu->hm.s.vmx.HCPhysEPTP;
1145 }
1146 descriptor[1] = 0; /* MBZ. Intel spec. 33.3 "VMX Instructions" */
1147
1148 int rc = VMXR0InvEPT(enmFlush, &descriptor[0]);
1149 AssertMsg(rc == VINF_SUCCESS, ("VMXR0InvEPT %#x %RGv failed with %Rrc\n", enmFlush, pVCpu ? pVCpu->hm.s.vmx.HCPhysEPTP : 0,
1150 rc));
1151 if ( RT_SUCCESS(rc)
1152 && pVCpu)
1153 {
1154 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushNestedPaging);
1155 }
1156}
1157
1158
1159/**
1160 * Flushes the TLB using VPID.
1161 *
1162 * @returns VBox status code.
1163 * @param pVM Pointer to the VM.
1164 * @param pVCpu Pointer to the VMCPU (can be NULL depending on @a
1165 * enmFlush).
1166 * @param enmFlush Type of flush.
1167 * @param GCPtr Virtual address of the page to flush (can be 0 depending
1168 * on @a enmFlush).
1169 */
1170static void hmR0VmxFlushVpid(PVM pVM, PVMCPU pVCpu, VMX_FLUSH_VPID enmFlush, RTGCPTR GCPtr)
1171{
1172 AssertPtr(pVM);
1173 Assert(pVM->hm.s.vmx.fVpid);
1174
1175 uint64_t descriptor[2];
1176 if (enmFlush == VMX_FLUSH_VPID_ALL_CONTEXTS)
1177 {
1178 descriptor[0] = 0;
1179 descriptor[1] = 0;
1180 }
1181 else
1182 {
1183 AssertPtr(pVCpu);
1184 AssertMsg(pVCpu->hm.s.uCurrentAsid != 0, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1185 AssertMsg(pVCpu->hm.s.uCurrentAsid <= UINT16_MAX, ("VMXR0InvVPID: invalid ASID %lu\n", pVCpu->hm.s.uCurrentAsid));
1186 descriptor[0] = pVCpu->hm.s.uCurrentAsid;
1187 descriptor[1] = GCPtr;
1188 }
1189
1190 int rc = VMXR0InvVPID(enmFlush, &descriptor[0]); NOREF(rc);
1191 AssertMsg(rc == VINF_SUCCESS,
1192 ("VMXR0InvVPID %#x %u %RGv failed with %d\n", enmFlush, pVCpu ? pVCpu->hm.s.uCurrentAsid : 0, GCPtr, rc));
1193 if ( RT_SUCCESS(rc)
1194 && pVCpu)
1195 {
1196 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushAsid);
1197 }
1198}
1199
1200
1201/**
1202 * Invalidates a guest page by guest virtual address. Only relevant for
1203 * EPT/VPID, otherwise there is nothing really to invalidate.
1204 *
1205 * @returns VBox status code.
1206 * @param pVM Pointer to the VM.
1207 * @param pVCpu Pointer to the VMCPU.
1208 * @param GCVirt Guest virtual address of the page to invalidate.
1209 */
1210VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt)
1211{
1212 AssertPtr(pVM);
1213 AssertPtr(pVCpu);
1214 LogFlowFunc(("pVM=%p pVCpu=%p GCVirt=%RGv\n", pVM, pVCpu, GCVirt));
1215
1216 bool fFlushPending = VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1217 if (!fFlushPending)
1218 {
1219 /*
1220 * We must invalidate the guest TLB entry in either case, we cannot ignore it even for the EPT case
1221 * See @bugref{6043} and @bugref{6177}.
1222 *
1223 * Set the VMCPU_FF_TLB_FLUSH force flag and flush before VM-entry in hmR0VmxFlushTLB*() as this
1224 * function maybe called in a loop with individual addresses.
1225 */
1226 if (pVM->hm.s.vmx.fVpid)
1227 {
1228 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1229 {
1230 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, GCVirt);
1231 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgVirt);
1232 }
1233 else
1234 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1235 }
1236 else if (pVM->hm.s.fNestedPaging)
1237 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1238 }
1239
1240 return VINF_SUCCESS;
1241}
1242
1243
1244/**
1245 * Invalidates a guest page by physical address. Only relevant for EPT/VPID,
1246 * otherwise there is nothing really to invalidate.
1247 *
1248 * @returns VBox status code.
1249 * @param pVM Pointer to the VM.
1250 * @param pVCpu Pointer to the VMCPU.
1251 * @param GCPhys Guest physical address of the page to invalidate.
1252 */
1253VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys)
1254{
1255 LogFlowFunc(("%RGp\n", GCPhys));
1256
1257 /*
1258 * We cannot flush a page by guest-physical address. invvpid takes only a linear address while invept only flushes
1259 * by EPT not individual addresses. We update the force flag here and flush before the next VM-entry in hmR0VmxFlushTLB*().
1260 * This function might be called in a loop. This should cause a flush-by-EPT if EPT is in use. See @bugref{6568}.
1261 */
1262 VMCPU_FF_SET(pVCpu, VMCPU_FF_TLB_FLUSH);
1263 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbInvlpgPhys);
1264 return VINF_SUCCESS;
1265}
1266
1267
1268/**
1269 * Dummy placeholder for tagged-TLB flush handling before VM-entry. Used in the
1270 * case where neither EPT nor VPID is supported by the CPU.
1271 *
1272 * @param pVM Pointer to the VM.
1273 * @param pVCpu Pointer to the VMCPU.
1274 *
1275 * @remarks Called with interrupts disabled.
1276 */
1277static void hmR0VmxFlushTaggedTlbNone(PVM pVM, PVMCPU pVCpu)
1278{
1279 NOREF(pVM);
1280 AssertPtr(pVCpu);
1281 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH);
1282 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1283
1284 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1285 AssertPtr(pCpu);
1286
1287 pVCpu->hm.s.TlbShootdown.cPages = 0;
1288 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1289 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1290 pVCpu->hm.s.fForceTLBFlush = false;
1291 return;
1292}
1293
1294
1295/**
1296 * Flushes the tagged-TLB entries for EPT+VPID CPUs as necessary.
1297 *
1298 * @param pVM Pointer to the VM.
1299 * @param pVCpu Pointer to the VMCPU.
1300 * @remarks All references to "ASID" in this function pertains to "VPID" in
1301 * Intel's nomenclature. The reason is, to avoid confusion in compare
1302 * statements since the host-CPU copies are named "ASID".
1303 *
1304 * @remarks Called with interrupts disabled.
1305 */
1306static void hmR0VmxFlushTaggedTlbBoth(PVM pVM, PVMCPU pVCpu)
1307{
1308#ifdef VBOX_WITH_STATISTICS
1309 bool fTlbFlushed = false;
1310# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { fTlbFlushed = true; } while (0)
1311# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { \
1312 if (!fTlbFlushed) \
1313 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch); \
1314 } while (0)
1315#else
1316# define HMVMX_SET_TAGGED_TLB_FLUSHED() do { } while (0)
1317# define HMVMX_UPDATE_FLUSH_SKIPPED_STAT() do { } while (0)
1318#endif
1319
1320 AssertPtr(pVM);
1321 AssertPtr(pVCpu);
1322 AssertMsg(pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid,
1323 ("hmR0VmxFlushTaggedTlbBoth cannot be invoked unless NestedPaging & VPID are enabled."
1324 "fNestedPaging=%RTbool fVpid=%RTbool", pVM->hm.s.fNestedPaging, pVM->hm.s.vmx.fVpid));
1325
1326 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1327 AssertPtr(pCpu);
1328
1329 /*
1330 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1331 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1332 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1333 */
1334 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1335 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1336 {
1337 ++pCpu->uCurrentAsid;
1338 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1339 {
1340 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0. */
1341 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1342 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1343 }
1344
1345 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1346 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1347 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1348
1349 /*
1350 * Flush by EPT when we get rescheduled to a new host CPU to ensure EPT-only tagged mappings are also
1351 * invalidated. We don't need to flush-by-VPID here as flushing by EPT covers it. See @bugref{6568}.
1352 */
1353 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1354 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1355 HMVMX_SET_TAGGED_TLB_FLUSHED();
1356 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH); /* Already flushed-by-EPT, skip doing it again below. */
1357 }
1358
1359 /* Check for explicit TLB shootdowns. */
1360 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1361 {
1362 /*
1363 * Changes to the EPT paging structure by VMM requires flushing by EPT as the CPU creates
1364 * guest-physical (only EPT-tagged) mappings while traversing the EPT tables when EPT is in use.
1365 * Flushing by VPID will only flush linear (only VPID-tagged) and combined (EPT+VPID tagged) mappings
1366 * but not guest-physical mappings.
1367 * See Intel spec. 28.3.2 "Creating and Using Cached Translation Information". See @bugref{6568}.
1368 */
1369 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1370 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1371 HMVMX_SET_TAGGED_TLB_FLUSHED();
1372 }
1373
1374 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1375 * not be executed. See hmQueueInvlPage() where it is commented
1376 * out. Support individual entry flushing someday. */
1377 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1378 {
1379 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1380
1381 /*
1382 * Flush individual guest entries using VPID from the TLB or as little as possible with EPT
1383 * as supported by the CPU.
1384 */
1385 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1386 {
1387 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1388 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1389 }
1390 else
1391 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1392
1393 HMVMX_SET_TAGGED_TLB_FLUSHED();
1394 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1395 }
1396
1397 pVCpu->hm.s.TlbShootdown.cPages = 0;
1398 pVCpu->hm.s.fForceTLBFlush = false;
1399
1400 HMVMX_UPDATE_FLUSH_SKIPPED_STAT();
1401
1402 Assert(pVCpu->hm.s.idLastCpu == pCpu->idCpu);
1403 Assert(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes);
1404 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1405 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1406 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1407 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1408 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1409 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1410
1411 /* Update VMCS with the VPID. */
1412 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1413 AssertRC(rc);
1414
1415#undef HMVMX_SET_TAGGED_TLB_FLUSHED
1416}
1417
1418
1419/**
1420 * Flushes the tagged-TLB entries for EPT CPUs as necessary.
1421 *
1422 * @returns VBox status code.
1423 * @param pVM Pointer to the VM.
1424 * @param pVCpu Pointer to the VMCPU.
1425 *
1426 * @remarks Called with interrupts disabled.
1427 */
1428static void hmR0VmxFlushTaggedTlbEpt(PVM pVM, PVMCPU pVCpu)
1429{
1430 AssertPtr(pVM);
1431 AssertPtr(pVCpu);
1432 AssertMsg(pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with NestedPaging disabled."));
1433 AssertMsg(!pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTaggedTlbEpt cannot be invoked with VPID enabled."));
1434
1435 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1436 AssertPtr(pCpu);
1437
1438 /*
1439 * Force a TLB flush for the first world-switch if the current CPU differs from the one we ran on last.
1440 * A change in the TLB flush count implies the host CPU is online after a suspend/resume.
1441 */
1442 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1443 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1444 {
1445 pVCpu->hm.s.fForceTLBFlush = true;
1446 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1447 }
1448
1449 /* Check for explicit TLB shootdown flushes. */
1450 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1451 {
1452 pVCpu->hm.s.fForceTLBFlush = true;
1453 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1454 }
1455
1456 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1457 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1458
1459 if (pVCpu->hm.s.fForceTLBFlush)
1460 {
1461 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1462 pVCpu->hm.s.fForceTLBFlush = false;
1463 }
1464 else
1465 {
1466 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1467 * not be executed. See hmQueueInvlPage() where it is commented
1468 * out. Support individual entry flushing someday. */
1469 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1470 {
1471 /* We cannot flush individual entries without VPID support. Flush using EPT. */
1472 STAM_COUNTER_INC(&pVCpu->hm.s.StatTlbShootdown);
1473 hmR0VmxFlushEpt(pVM, pVCpu, pVM->hm.s.vmx.enmFlushEpt);
1474 }
1475 else
1476 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1477 }
1478
1479 pVCpu->hm.s.TlbShootdown.cPages = 0;
1480 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1481}
1482
1483
1484/**
1485 * Flushes the tagged-TLB entries for VPID CPUs as necessary.
1486 *
1487 * @returns VBox status code.
1488 * @param pVM Pointer to the VM.
1489 * @param pVCpu Pointer to the VMCPU.
1490 *
1491 * @remarks Called with interrupts disabled.
1492 */
1493static void hmR0VmxFlushTaggedTlbVpid(PVM pVM, PVMCPU pVCpu)
1494{
1495 AssertPtr(pVM);
1496 AssertPtr(pVCpu);
1497 AssertMsg(pVM->hm.s.vmx.fVpid, ("hmR0VmxFlushTlbVpid cannot be invoked with VPID disabled."));
1498 AssertMsg(!pVM->hm.s.fNestedPaging, ("hmR0VmxFlushTlbVpid cannot be invoked with NestedPaging enabled"));
1499
1500 PHMGLOBALCPUINFO pCpu = HMR0GetCurrentCpu();
1501
1502 /*
1503 * Force a TLB flush for the first world switch if the current CPU differs from the one we ran on last.
1504 * If the TLB flush count changed, another VM (VCPU rather) has hit the ASID limit while flushing the TLB
1505 * or the host CPU is online after a suspend/resume, so we cannot reuse the current ASID anymore.
1506 */
1507 if ( pVCpu->hm.s.idLastCpu != pCpu->idCpu
1508 || pVCpu->hm.s.cTlbFlushes != pCpu->cTlbFlushes)
1509 {
1510 pVCpu->hm.s.fForceTLBFlush = true;
1511 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlbWorldSwitch);
1512 }
1513
1514 /* Check for explicit TLB shootdown flushes. */
1515 if (VMCPU_FF_TEST_AND_CLEAR(pVCpu, VMCPU_FF_TLB_FLUSH))
1516 {
1517 /*
1518 * If we ever support VPID flush combinations other than ALL or SINGLE-context (see hmR0VmxSetupTaggedTlb())
1519 * we would need to explicitly flush in this case (add an fExplicitFlush = true here and change the
1520 * pCpu->fFlushAsidBeforeUse check below to include fExplicitFlush's too) - an obscure corner case.
1521 */
1522 pVCpu->hm.s.fForceTLBFlush = true;
1523 STAM_COUNTER_INC(&pVCpu->hm.s.StatFlushTlb);
1524 }
1525
1526 pVCpu->hm.s.idLastCpu = pCpu->idCpu;
1527 if (pVCpu->hm.s.fForceTLBFlush)
1528 {
1529 ++pCpu->uCurrentAsid;
1530 if (pCpu->uCurrentAsid >= pVM->hm.s.uMaxAsid)
1531 {
1532 pCpu->uCurrentAsid = 1; /* Wraparound to 1; host uses 0 */
1533 pCpu->cTlbFlushes++; /* All VCPUs that run on this host CPU must use a new VPID. */
1534 pCpu->fFlushAsidBeforeUse = true; /* All VCPUs that run on this host CPU must flush their new VPID before use. */
1535 }
1536
1537 pVCpu->hm.s.fForceTLBFlush = false;
1538 pVCpu->hm.s.cTlbFlushes = pCpu->cTlbFlushes;
1539 pVCpu->hm.s.uCurrentAsid = pCpu->uCurrentAsid;
1540 if (pCpu->fFlushAsidBeforeUse)
1541 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1542 }
1543 else
1544 {
1545 AssertMsg(pVCpu->hm.s.uCurrentAsid && pCpu->uCurrentAsid,
1546 ("hm->uCurrentAsid=%lu hm->cTlbFlushes=%lu cpu->uCurrentAsid=%lu cpu->cTlbFlushes=%lu\n",
1547 pVCpu->hm.s.uCurrentAsid, pVCpu->hm.s.cTlbFlushes,
1548 pCpu->uCurrentAsid, pCpu->cTlbFlushes));
1549
1550 /** @todo We never set VMCPU_FF_TLB_SHOOTDOWN anywhere so this path should
1551 * not be executed. See hmQueueInvlPage() where it is commented
1552 * out. Support individual entry flushing someday. */
1553 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_TLB_SHOOTDOWN))
1554 {
1555 /* Flush individual guest entries using VPID or as little as possible with EPT as supported by the CPU. */
1556 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1557 {
1558 for (uint32_t i = 0; i < pVCpu->hm.s.TlbShootdown.cPages; i++)
1559 hmR0VmxFlushVpid(pVM, pVCpu, VMX_FLUSH_VPID_INDIV_ADDR, pVCpu->hm.s.TlbShootdown.aPages[i]);
1560 }
1561 else
1562 hmR0VmxFlushVpid(pVM, pVCpu, pVM->hm.s.vmx.enmFlushVpid, 0 /* GCPtr */);
1563 }
1564 else
1565 STAM_COUNTER_INC(&pVCpu->hm.s.StatNoFlushTlbWorldSwitch);
1566 }
1567
1568 pVCpu->hm.s.TlbShootdown.cPages = 0;
1569 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TLB_SHOOTDOWN);
1570
1571 AssertMsg(pVCpu->hm.s.cTlbFlushes == pCpu->cTlbFlushes,
1572 ("Flush count mismatch for cpu %d (%u vs %u)\n", pCpu->idCpu, pVCpu->hm.s.cTlbFlushes, pCpu->cTlbFlushes));
1573 AssertMsg(pCpu->uCurrentAsid >= 1 && pCpu->uCurrentAsid < pVM->hm.s.uMaxAsid,
1574 ("cpu%d uCurrentAsid = %u\n", pCpu->idCpu, pCpu->uCurrentAsid));
1575 AssertMsg(pVCpu->hm.s.uCurrentAsid >= 1 && pVCpu->hm.s.uCurrentAsid < pVM->hm.s.uMaxAsid,
1576 ("cpu%d VM uCurrentAsid = %u\n", pCpu->idCpu, pVCpu->hm.s.uCurrentAsid));
1577
1578 int rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, pVCpu->hm.s.uCurrentAsid);
1579 AssertRC(rc);
1580}
1581
1582
1583/**
1584 * Flushes the guest TLB entry based on CPU capabilities.
1585 *
1586 * @param pVCpu Pointer to the VMCPU.
1587 */
1588DECLINLINE(void) hmR0VmxFlushTaggedTlb(PVMCPU pVCpu)
1589{
1590 PVM pVM = pVCpu->CTX_SUFF(pVM);
1591 switch (pVM->hm.s.vmx.uFlushTaggedTlb)
1592 {
1593 case HMVMX_FLUSH_TAGGED_TLB_EPT_VPID: hmR0VmxFlushTaggedTlbBoth(pVM, pVCpu); break;
1594 case HMVMX_FLUSH_TAGGED_TLB_EPT: hmR0VmxFlushTaggedTlbEpt(pVM, pVCpu); break;
1595 case HMVMX_FLUSH_TAGGED_TLB_VPID: hmR0VmxFlushTaggedTlbVpid(pVM, pVCpu); break;
1596 case HMVMX_FLUSH_TAGGED_TLB_NONE: hmR0VmxFlushTaggedTlbNone(pVM, pVCpu); break;
1597 default:
1598 AssertMsgFailed(("Invalid flush-tag function identifier\n"));
1599 break;
1600 }
1601}
1602
1603
1604/**
1605 * Sets up the appropriate tagged TLB-flush level and handler for flushing guest
1606 * TLB entries from the host TLB before VM-entry.
1607 *
1608 * @returns VBox status code.
1609 * @param pVM Pointer to the VM.
1610 */
1611static int hmR0VmxSetupTaggedTlb(PVM pVM)
1612{
1613 /*
1614 * Determine optimal flush type for Nested Paging.
1615 * We cannot ignore EPT if no suitable flush-types is supported by the CPU as we've already setup unrestricted
1616 * guest execution (see hmR3InitFinalizeR0()).
1617 */
1618 if (pVM->hm.s.fNestedPaging)
1619 {
1620 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT)
1621 {
1622 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT)
1623 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_SINGLE_CONTEXT;
1624 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS)
1625 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_ALL_CONTEXTS;
1626 else
1627 {
1628 /* Shouldn't happen. EPT is supported but no suitable flush-types supported. */
1629 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1630 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1631 }
1632
1633 /* Make sure the write-back cacheable memory type for EPT is supported. */
1634 if (!(pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB))
1635 {
1636 LogRel(("hmR0VmxSetupTaggedTlb: Unsupported EPTP memory type %#x.\n", pVM->hm.s.vmx.msr.vmx_ept_vpid_caps));
1637 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1638 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1639 }
1640 }
1641 else
1642 {
1643 /* Shouldn't happen. EPT is supported but INVEPT instruction is not supported. */
1644 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NOT_SUPPORTED;
1645 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1646 }
1647 }
1648
1649 /*
1650 * Determine optimal flush type for VPID.
1651 */
1652 if (pVM->hm.s.vmx.fVpid)
1653 {
1654 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID)
1655 {
1656 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT)
1657 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_SINGLE_CONTEXT;
1658 else if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS)
1659 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_ALL_CONTEXTS;
1660 else
1661 {
1662 /* Neither SINGLE nor ALL-context flush types for VPID is supported by the CPU. Ignore VPID capability. */
1663 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR)
1664 LogRel(("hmR0VmxSetupTaggedTlb: Only INDIV_ADDR supported. Ignoring VPID.\n"));
1665 if (pVM->hm.s.vmx.msr.vmx_ept_vpid_caps & MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS)
1666 LogRel(("hmR0VmxSetupTaggedTlb: Only SINGLE_CONTEXT_RETAIN_GLOBALS supported. Ignoring VPID.\n"));
1667 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1668 pVM->hm.s.vmx.fVpid = false;
1669 }
1670 }
1671 else
1672 {
1673 /* Shouldn't happen. VPID is supported but INVVPID is not supported by the CPU. Ignore VPID capability. */
1674 Log4(("hmR0VmxSetupTaggedTlb: VPID supported without INVEPT support. Ignoring VPID.\n"));
1675 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NOT_SUPPORTED;
1676 pVM->hm.s.vmx.fVpid = false;
1677 }
1678 }
1679
1680 /*
1681 * Setup the handler for flushing tagged-TLBs.
1682 */
1683 if (pVM->hm.s.fNestedPaging && pVM->hm.s.vmx.fVpid)
1684 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT_VPID;
1685 else if (pVM->hm.s.fNestedPaging)
1686 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_EPT;
1687 else if (pVM->hm.s.vmx.fVpid)
1688 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_VPID;
1689 else
1690 pVM->hm.s.vmx.uFlushTaggedTlb = HMVMX_FLUSH_TAGGED_TLB_NONE;
1691 return VINF_SUCCESS;
1692}
1693
1694
1695/**
1696 * Sets up pin-based VM-execution controls in the VMCS.
1697 *
1698 * @returns VBox status code.
1699 * @param pVM Pointer to the VM.
1700 * @param pVCpu Pointer to the VMCPU.
1701 */
1702static int hmR0VmxSetupPinCtls(PVM pVM, PVMCPU pVCpu)
1703{
1704 AssertPtr(pVM);
1705 AssertPtr(pVCpu);
1706
1707 uint32_t val = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0; /* Bits set here must always be set. */
1708 uint32_t zap = pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1; /* Bits cleared here must always be cleared. */
1709
1710 val |= VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT /* External interrupts causes a VM-exits. */
1711 | VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT; /* Non-maskable interrupts causes a VM-exit. */
1712 Assert(!(val & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI));
1713
1714 /* Enable the VMX preemption timer. */
1715 if (pVM->hm.s.vmx.fUsePreemptTimer)
1716 {
1717 Assert(pVM->hm.s.vmx.msr.vmx_pin_ctls.n.allowed1 & VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER);
1718 val |= VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER;
1719 }
1720
1721 if ((val & zap) != val)
1722 {
1723 LogRel(("hmR0VmxSetupPinCtls: invalid pin-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1724 pVM->hm.s.vmx.msr.vmx_pin_ctls.n.disallowed0, val, zap));
1725 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PIN_EXEC;
1726 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1727 }
1728
1729 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, val);
1730 AssertRCReturn(rc, rc);
1731
1732 /* Update VCPU with the currently set pin-based VM-execution controls. */
1733 pVCpu->hm.s.vmx.u32PinCtls = val;
1734 return rc;
1735}
1736
1737
1738/**
1739 * Sets up processor-based VM-execution controls in the VMCS.
1740 *
1741 * @returns VBox status code.
1742 * @param pVM Pointer to the VM.
1743 * @param pVMCPU Pointer to the VMCPU.
1744 */
1745static int hmR0VmxSetupProcCtls(PVM pVM, PVMCPU pVCpu)
1746{
1747 AssertPtr(pVM);
1748 AssertPtr(pVCpu);
1749
1750 int rc = VERR_INTERNAL_ERROR_5;
1751 uint32_t val = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0; /* Bits set here must be set in the VMCS. */
1752 uint32_t zap = pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1753
1754 val |= VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT /* HLT causes a VM-exit. */
1755 | VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING /* Use TSC-offsetting. */
1756 | VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT /* MOV DRx causes a VM-exit. */
1757 | VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT /* All IO instructions cause a VM-exit. */
1758 | VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT /* RDPMC causes a VM-exit. */
1759 | VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT /* MONITOR causes a VM-exit. */
1760 | VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT; /* MWAIT causes a VM-exit. */
1761
1762 /* We toggle VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT later, check if it's not -always- needed to be set or clear. */
1763 if ( !(pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT)
1764 || (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0 & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT))
1765 {
1766 LogRel(("hmR0VmxSetupProcCtls: unsupported VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT combo!"));
1767 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_MOV_DRX_EXIT;
1768 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1769 }
1770
1771 /* Without Nested Paging, INVLPG (also affects INVPCID) and MOV CR3 instructions should cause VM-exits. */
1772 if (!pVM->hm.s.fNestedPaging)
1773 {
1774 Assert(!pVM->hm.s.vmx.fUnrestrictedGuest); /* Paranoia. */
1775 val |= VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT
1776 | VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
1777 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
1778 }
1779
1780 /* Use TPR shadowing if supported by the CPU. */
1781 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
1782 {
1783 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
1784 Assert(!(pVCpu->hm.s.vmx.HCPhysVirtApic & 0xfff)); /* Bits 11:0 MBZ. */
1785 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, 0);
1786 rc |= VMXWriteVmcs64(VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL, pVCpu->hm.s.vmx.HCPhysVirtApic);
1787 AssertRCReturn(rc, rc);
1788
1789 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW; /* CR8 reads from the Virtual-APIC page. */
1790 /* CR8 writes causes a VM-exit based on TPR threshold. */
1791 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT));
1792 Assert(!(val & VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT));
1793 }
1794 else
1795 {
1796 val |= VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT /* CR8 reads causes a VM-exit. */
1797 | VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT; /* CR8 writes causes a VM-exit. */
1798 }
1799
1800 /* Use MSR-bitmaps if supported by the CPU. */
1801 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1802 {
1803 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS;
1804
1805 Assert(pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1806 Assert(!(pVCpu->hm.s.vmx.HCPhysMsrBitmap & 0xfff)); /* Bits 11:0 MBZ. */
1807 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_MSR_BITMAP_FULL, pVCpu->hm.s.vmx.HCPhysMsrBitmap);
1808 AssertRCReturn(rc, rc);
1809
1810 /*
1811 * The guest can access the following MSRs (read, write) without causing VM-exits; they are loaded/stored
1812 * automatically (either as part of the MSR-load/store areas or dedicated fields in the VMCS).
1813 */
1814 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_CS, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1815 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_ESP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1816 hmR0VmxSetMsrPermission(pVCpu, MSR_IA32_SYSENTER_EIP, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1817 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_LSTAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1818 hmR0VmxSetMsrPermission(pVCpu, MSR_K6_STAR, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1819 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_SF_MASK, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1820 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_KERNEL_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1821 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_GS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1822 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_FS_BASE, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1823 }
1824
1825 /* Use the secondary processor-based VM-execution controls if supported by the CPU. */
1826 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL)
1827 val |= VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL;
1828
1829 if ((val & zap) != val)
1830 {
1831 LogRel(("hmR0VmxSetupProcCtls: invalid processor-based VM-execution controls combo! cpu=%#RX64 val=%#RX64 zap=%#RX64\n",
1832 pVM->hm.s.vmx.msr.vmx_proc_ctls.n.disallowed0, val, zap));
1833 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_PROC_EXEC;
1834 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1835 }
1836
1837 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, val);
1838 AssertRCReturn(rc, rc);
1839
1840 /* Update VCPU with the currently set processor-based VM-execution controls. */
1841 pVCpu->hm.s.vmx.u32ProcCtls = val;
1842
1843 /*
1844 * Secondary processor-based VM-execution controls.
1845 */
1846 if (RT_LIKELY(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL))
1847 {
1848 val = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0; /* Bits set here must be set in the VMCS. */
1849 zap = pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
1850
1851 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT)
1852 val |= VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT; /* WBINVD causes a VM-exit. */
1853
1854 if (pVM->hm.s.fNestedPaging)
1855 val |= VMX_VMCS_CTRL_PROC_EXEC2_EPT; /* Enable EPT. */
1856 else
1857 {
1858 /*
1859 * Without Nested Paging, INVPCID should cause a VM-exit. Enabling this bit causes the CPU to refer to
1860 * VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT when INVPCID is executed by the guest.
1861 * See Intel spec. 25.4 "Changes to instruction behaviour in VMX non-root operation".
1862 */
1863 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_INVPCID)
1864 val |= VMX_VMCS_CTRL_PROC_EXEC2_INVPCID;
1865 }
1866
1867 if (pVM->hm.s.vmx.fVpid)
1868 val |= VMX_VMCS_CTRL_PROC_EXEC2_VPID; /* Enable VPID. */
1869
1870 if (pVM->hm.s.vmx.fUnrestrictedGuest)
1871 val |= VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST; /* Enable Unrestricted Execution. */
1872
1873 /* Enable Virtual-APIC page accesses if supported by the CPU. This is essentially where the TPR shadow resides. */
1874 /** @todo VIRT_X2APIC support, it's mutually exclusive with this. So must be
1875 * done dynamically. */
1876 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC)
1877 {
1878 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
1879 Assert(!(pVM->hm.s.vmx.HCPhysApicAccess & 0xfff)); /* Bits 11:0 MBZ. */
1880 val |= VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC; /* Virtualize APIC accesses. */
1881 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL, pVM->hm.s.vmx.HCPhysApicAccess);
1882 AssertRCReturn(rc, rc);
1883 }
1884
1885 if (pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
1886 {
1887 val |= VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP; /* Enable RDTSCP support. */
1888 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS)
1889 hmR0VmxSetMsrPermission(pVCpu, MSR_K8_TSC_AUX, VMXMSREXIT_PASSTHRU_READ, VMXMSREXIT_PASSTHRU_WRITE);
1890 }
1891
1892 if ((val & zap) != val)
1893 {
1894 LogRel(("hmR0VmxSetupProcCtls: invalid secondary processor-based VM-execution controls combo! "
1895 "cpu=%#RX64 val=%#RX64 zap=%#RX64\n", pVM->hm.s.vmx.msr.vmx_proc_ctls2.n.disallowed0, val, zap));
1896 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1897 }
1898
1899 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, val);
1900 AssertRCReturn(rc, rc);
1901
1902 /* Update VCPU with the currently set secondary processor-based VM-execution controls. */
1903 pVCpu->hm.s.vmx.u32ProcCtls2 = val;
1904 }
1905 else if (RT_UNLIKELY(pVM->hm.s.vmx.fUnrestrictedGuest))
1906 {
1907 LogRel(("hmR0VmxSetupProcCtls: Unrestricted Guest set as true when secondary processor-based VM-execution controls not "
1908 "available\n"));
1909 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
1910 }
1911
1912 return VINF_SUCCESS;
1913}
1914
1915
1916/**
1917 * Sets up miscellaneous (everything other than Pin & Processor-based
1918 * VM-execution) control fields in the VMCS.
1919 *
1920 * @returns VBox status code.
1921 * @param pVM Pointer to the VM.
1922 * @param pVCpu Pointer to the VMCPU.
1923 */
1924static int hmR0VmxSetupMiscCtls(PVM pVM, PVMCPU pVCpu)
1925{
1926 AssertPtr(pVM);
1927 AssertPtr(pVCpu);
1928
1929 int rc = VERR_GENERAL_FAILURE;
1930
1931 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1932#if 0
1933 /* All CR3 accesses cause VM-exits. Later we optimize CR3 accesses (see hmR0VmxLoadGuestControlRegs())*/
1934 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, 0); AssertRCReturn(rc, rc);
1935 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, 0); AssertRCReturn(rc, rc);
1936
1937 /*
1938 * Set MASK & MATCH to 0. VMX checks if GuestPFErrCode & MASK == MATCH. If equal (in our case it always is)
1939 * and if the X86_XCPT_PF bit in the exception bitmap is set it causes a VM-exit, if clear doesn't cause an exit.
1940 * We thus use the exception bitmap to control it rather than use both.
1941 */
1942 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, 0); AssertRCReturn(rc, rc);
1943 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, 0); AssertRCReturn(rc, rc);
1944
1945 /** @todo Explore possibility of using IO-bitmaps. */
1946 /* All IO & IOIO instructions cause VM-exits. */
1947 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_A_FULL, 0); AssertRCReturn(rc, rc);
1948 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_IO_BITMAP_B_FULL, 0); AssertRCReturn(rc, rc);
1949
1950 /* Initialize the MSR-bitmap area. */
1951 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1952 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, 0); AssertRCReturn(rc, rc);
1953 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, 0); AssertRCReturn(rc, rc);
1954#endif
1955
1956#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
1957 /* Setup MSR autoloading/storing. */
1958 Assert(pVCpu->hm.s.vmx.HCPhysGuestMsr);
1959 Assert(!(pVCpu->hm.s.vmx.HCPhysGuestMsr & 0xf)); /* Lower 4 bits MBZ. */
1960 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1961 AssertRCReturn(rc, rc);
1962 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL, pVCpu->hm.s.vmx.HCPhysGuestMsr);
1963 AssertRCReturn(rc, rc);
1964
1965 Assert(pVCpu->hm.s.vmx.HCPhysHostMsr);
1966 Assert(!(pVCpu->hm.s.vmx.HCPhysHostMsr & 0xf)); /* Lower 4 bits MBZ. */
1967 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL, pVCpu->hm.s.vmx.HCPhysHostMsr);
1968 AssertRCReturn(rc, rc);
1969#endif
1970
1971 /* Set VMCS link pointer. Reserved for future use, must be -1. Intel spec. 24.4 "Guest-State Area". */
1972 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, UINT64_C(0xffffffffffffffff));
1973 AssertRCReturn(rc, rc);
1974
1975 /* All fields are zero-initialized during allocation; but don't remove the commented block below. */
1976#if 0
1977 /* Setup debug controls */
1978 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, 0); /** @todo We don't support IA32_DEBUGCTL MSR. Should we? */
1979 AssertRCReturn(rc, rc);
1980 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, 0);
1981 AssertRCReturn(rc, rc);
1982#endif
1983
1984 return rc;
1985}
1986
1987
1988/**
1989 * Sets up the initial exception bitmap in the VMCS based on static conditions
1990 * (i.e. conditions that cannot ever change at runtime).
1991 *
1992 * @returns VBox status code.
1993 * @param pVM Pointer to the VM.
1994 * @param pVCpu Pointer to the VMCPU.
1995 */
1996static int hmR0VmxInitXcptBitmap(PVM pVM, PVMCPU pVCpu)
1997{
1998 AssertPtr(pVM);
1999 AssertPtr(pVCpu);
2000
2001 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
2002
2003 uint32_t u32XcptBitmap = 0;
2004
2005 /* Without Nested Paging, #PF must cause a VM-exit so we can sync our shadow page tables. */
2006 if (!pVM->hm.s.fNestedPaging)
2007 u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
2008
2009 pVCpu->hm.s.vmx.u32XcptBitmap = u32XcptBitmap;
2010 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, u32XcptBitmap);
2011 AssertRCReturn(rc, rc);
2012 return rc;
2013}
2014
2015
2016/**
2017 * Sets up the initial guest-state mask. The guest-state mask is consulted
2018 * before reading guest-state fields from the VMCS as VMREADs can be expensive
2019 * for the nested virtualization case (as it would cause a VM-exit).
2020 *
2021 * @param pVCpu Pointer to the VMCPU.
2022 */
2023static int hmR0VmxInitUpdatedGuestStateMask(PVMCPU pVCpu)
2024{
2025 /* Initially the guest-state is up-to-date as there is nothing in the VMCS. */
2026 pVCpu->hm.s.vmx.fUpdatedGuestState = HMVMX_UPDATED_GUEST_ALL;
2027 return VINF_SUCCESS;
2028}
2029
2030
2031/**
2032 * Does per-VM VT-x initialization.
2033 *
2034 * @returns VBox status code.
2035 * @param pVM Pointer to the VM.
2036 */
2037VMMR0DECL(int) VMXR0InitVM(PVM pVM)
2038{
2039 LogFlowFunc(("pVM=%p\n", pVM));
2040
2041 int rc = hmR0VmxStructsAlloc(pVM);
2042 if (RT_FAILURE(rc))
2043 {
2044 LogRel(("VMXR0InitVM: hmR0VmxStructsAlloc failed! rc=%Rrc\n", rc));
2045 return rc;
2046 }
2047
2048 return VINF_SUCCESS;
2049}
2050
2051
2052/**
2053 * Does per-VM VT-x termination.
2054 *
2055 * @returns VBox status code.
2056 * @param pVM Pointer to the VM.
2057 */
2058VMMR0DECL(int) VMXR0TermVM(PVM pVM)
2059{
2060 LogFlowFunc(("pVM=%p\n", pVM));
2061
2062#ifdef VBOX_WITH_CRASHDUMP_MAGIC
2063 if (pVM->hm.s.vmx.hMemObjScratch != NIL_RTR0MEMOBJ)
2064 ASMMemZero32(pVM->hm.s.vmx.pvScratch, PAGE_SIZE);
2065#endif
2066 hmR0VmxStructsFree(pVM);
2067 return VINF_SUCCESS;
2068}
2069
2070
2071/**
2072 * Sets up the VM for execution under VT-x.
2073 * This function is only called once per-VM during initialization.
2074 *
2075 * @returns VBox status code.
2076 * @param pVM Pointer to the VM.
2077 */
2078VMMR0DECL(int) VMXR0SetupVM(PVM pVM)
2079{
2080 AssertPtrReturn(pVM, VERR_INVALID_PARAMETER);
2081 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
2082
2083 LogFlowFunc(("pVM=%p\n", pVM));
2084
2085 /*
2086 * Without UnrestrictedGuest, pRealModeTSS and pNonPagingModeEPTPageTable *must* always be allocated.
2087 * We no longer support the highly unlikely case of UnrestrictedGuest without pRealModeTSS. See hmR3InitFinalizeR0().
2088 */
2089 /* -XXX- change hmR3InitFinalizeR0Intel() to fail if pRealModeTSS alloc fails. */
2090 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
2091 && ( !pVM->hm.s.vmx.pNonPagingModeEPTPageTable
2092 || !pVM->hm.s.vmx.pRealModeTSS))
2093 {
2094 LogRel(("VMXR0SetupVM: invalid real-on-v86 state.\n"));
2095 return VERR_INTERNAL_ERROR;
2096 }
2097
2098#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2099 /*
2100 * This is for the darwin 32-bit/PAE kernels trying to execute 64-bit guests. We don't bother with
2101 * the 32<->64 switcher in this case. This is a rare, legacy use-case with barely any test coverage.
2102 */
2103 if ( pVM->hm.s.fAllow64BitGuests
2104 && !HMVMX_IS_64BIT_HOST_MODE())
2105 {
2106 LogRel(("VMXR0SetupVM: Unsupported guest and host paging mode combination.\n"));
2107 return VERR_PGM_UNSUPPORTED_HOST_PAGING_MODE;
2108 }
2109#endif
2110
2111 /* Initialize these always, see hmR3InitFinalizeR0().*/
2112 pVM->hm.s.vmx.enmFlushEpt = VMX_FLUSH_EPT_NONE;
2113 pVM->hm.s.vmx.enmFlushVpid = VMX_FLUSH_VPID_NONE;
2114
2115 /* Setup the tagged-TLB flush handlers. */
2116 int rc = hmR0VmxSetupTaggedTlb(pVM);
2117 if (RT_FAILURE(rc))
2118 {
2119 LogRel(("VMXR0SetupVM: hmR0VmxSetupTaggedTlb failed! rc=%Rrc\n", rc));
2120 return rc;
2121 }
2122
2123 for (VMCPUID i = 0; i < pVM->cCpus; i++)
2124 {
2125 PVMCPU pVCpu = &pVM->aCpus[i];
2126 AssertPtr(pVCpu);
2127 AssertPtr(pVCpu->hm.s.vmx.pvVmcs);
2128
2129 /* Log the VCPU pointers, useful for debugging SMP VMs. */
2130 Log4(("VMXR0SetupVM: pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
2131
2132 /* Set revision dword at the beginning of the VMCS structure. */
2133 *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs = MSR_IA32_VMX_BASIC_INFO_VMCS_ID(pVM->hm.s.vmx.msr.vmx_basic_info);
2134
2135 /* Initialize our VMCS region in memory, set the VMCS launch state to "clear". */
2136 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2137 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2138 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2139
2140 /* Load this VMCS as the current VMCS. */
2141 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2142 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXActivateVmcs failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2143 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2144
2145 rc = hmR0VmxSetupPinCtls(pVM, pVCpu);
2146 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupPinCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2147 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2148
2149 rc = hmR0VmxSetupProcCtls(pVM, pVCpu);
2150 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupProcCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2151 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2152
2153 rc = hmR0VmxSetupMiscCtls(pVM, pVCpu);
2154 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxSetupMiscCtls failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2155 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2156
2157 rc = hmR0VmxInitXcptBitmap(pVM, pVCpu);
2158 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitXcptBitmap failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2159 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2160
2161 rc = hmR0VmxInitUpdatedGuestStateMask(pVCpu);
2162 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitUpdatedGuestStateMask failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2163 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2164
2165#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2166 rc = hmR0VmxInitVmcsReadCache(pVM, pVCpu);
2167 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: hmR0VmxInitVmcsReadCache failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2168 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2169#endif
2170
2171 /* Re-sync the CPU's internal data into our VMCS memory region & reset the launch state to "clear". */
2172 rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
2173 AssertLogRelMsgRCReturnStmt(rc, ("VMXR0SetupVM: VMXClearVmcs(2) failed! rc=%Rrc (pVM=%p)\n", rc, pVM),
2174 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc), rc);
2175
2176 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
2177
2178 hmR0VmxUpdateErrorRecord(pVM, pVCpu, rc);
2179 }
2180
2181 return VINF_SUCCESS;
2182}
2183
2184
2185/**
2186 * Saves the host control registers (CR0, CR3, CR4) into the host-state area in
2187 * the VMCS.
2188 *
2189 * @returns VBox status code.
2190 * @param pVM Pointer to the VM.
2191 * @param pVCpu Pointer to the VMCPU.
2192 */
2193DECLINLINE(int) hmR0VmxSaveHostControlRegs(PVM pVM, PVMCPU pVCpu)
2194{
2195 RTCCUINTREG uReg = ASMGetCR0();
2196 int rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR0, uReg);
2197 AssertRCReturn(rc, rc);
2198
2199#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2200 /* For the darwin 32-bit hybrid kernel, we need the 64-bit CR3 as it uses 64-bit paging. */
2201 if (HMVMX_IS_64BIT_HOST_MODE())
2202 {
2203 uint64_t uRegCR3 = HMR0Get64bitCR3();
2204 rc = VMXWriteVmcs64(VMX_VMCS_HOST_CR3, uRegCR3);
2205 }
2206 else
2207#endif
2208 {
2209 uReg = ASMGetCR3();
2210 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR3, uReg);
2211 }
2212 AssertRCReturn(rc, rc);
2213
2214 uReg = ASMGetCR4();
2215 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_CR4, uReg);
2216 AssertRCReturn(rc, rc);
2217 return rc;
2218}
2219
2220
2221/**
2222 * Saves the host segment registers and GDTR, IDTR, (TR, GS and FS bases) into
2223 * the host-state area in the VMCS.
2224 *
2225 * @returns VBox status code.
2226 * @param pVM Pointer to the VM.
2227 * @param pVCpu Pointer to the VMCPU.
2228 */
2229DECLINLINE(int) hmR0VmxSaveHostSegmentRegs(PVM pVM, PVMCPU pVCpu)
2230{
2231 int rc = VERR_INTERNAL_ERROR_5;
2232 RTSEL uSelDS = 0;
2233 RTSEL uSelES = 0;
2234 RTSEL uSelFS = 0;
2235 RTSEL uSelGS = 0;
2236 RTSEL uSelTR = 0;
2237
2238 /*
2239 * Host DS, ES, FS and GS segment registers.
2240 */
2241#if HC_ARCH_BITS == 64
2242 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
2243 uSelDS = ASMGetDS();
2244 uSelES = ASMGetES();
2245 uSelFS = ASMGetFS();
2246 uSelGS = ASMGetGS();
2247#endif
2248
2249 /*
2250 * Host CS and SS segment registers.
2251 */
2252 RTSEL uSelCS;
2253 RTSEL uSelSS;
2254#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2255 if (HMVMX_IS_64BIT_HOST_MODE())
2256 {
2257 uSelCS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelCS;
2258 uSelSS = (RTSEL)(uintptr_t)&SUPR0Abs64bitKernelSS;
2259 }
2260 else
2261 {
2262 /* Seems darwin uses the LDT (TI flag is set) in the CS & SS selectors which VT-x doesn't like. */
2263 uSelCS = (RTSEL)(uintptr_t)&SUPR0AbsKernelCS;
2264 uSelSS = (RTSEL)(uintptr_t)&SUPR0AbsKernelSS;
2265 }
2266#else
2267 uSelCS = ASMGetCS();
2268 uSelSS = ASMGetSS();
2269#endif
2270
2271 /*
2272 * Host TR segment register.
2273 */
2274 uSelTR = ASMGetTR();
2275
2276#if HC_ARCH_BITS == 64
2277 /*
2278 * Determine if the host segment registers are suitable for VT-x. Otherwise use zero to gain VM-entry and restore them
2279 * before we get preempted. See Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers".
2280 */
2281 if (uSelDS & (X86_SEL_RPL | X86_SEL_LDT))
2282 {
2283 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_DS;
2284 pVCpu->hm.s.vmx.RestoreHost.uHostSelDS = uSelDS;
2285 uSelDS = 0;
2286 }
2287 if (uSelES & (X86_SEL_RPL | X86_SEL_LDT))
2288 {
2289 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_ES;
2290 pVCpu->hm.s.vmx.RestoreHost.uHostSelES = uSelES;
2291 uSelES = 0;
2292 }
2293 if (uSelFS & (X86_SEL_RPL | X86_SEL_LDT))
2294 {
2295 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_FS;
2296 pVCpu->hm.s.vmx.RestoreHost.uHostSelFS = uSelFS;
2297 uSelFS = 0;
2298 }
2299 if (uSelGS & (X86_SEL_RPL | X86_SEL_LDT))
2300 {
2301 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_GS;
2302 pVCpu->hm.s.vmx.RestoreHost.uHostSelGS = uSelGS;
2303 uSelGS = 0;
2304 }
2305#endif
2306
2307 /* Verification based on Intel spec. 26.2.3 "Checks on Host Segment and Descriptor-Table Registers" */
2308 Assert(!(uSelCS & X86_SEL_RPL)); Assert(!(uSelCS & X86_SEL_LDT));
2309 Assert(!(uSelSS & X86_SEL_RPL)); Assert(!(uSelSS & X86_SEL_LDT));
2310 Assert(!(uSelDS & X86_SEL_RPL)); Assert(!(uSelDS & X86_SEL_LDT));
2311 Assert(!(uSelES & X86_SEL_RPL)); Assert(!(uSelES & X86_SEL_LDT));
2312 Assert(!(uSelFS & X86_SEL_RPL)); Assert(!(uSelFS & X86_SEL_LDT));
2313 Assert(!(uSelGS & X86_SEL_RPL)); Assert(!(uSelGS & X86_SEL_LDT));
2314 Assert(!(uSelTR & X86_SEL_RPL)); Assert(!(uSelTR & X86_SEL_LDT));
2315 Assert(uSelCS);
2316 Assert(uSelTR);
2317
2318 /* Assertion is right but we would not have updated u32ExitCtls yet. */
2319#if 0
2320 if (!(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE))
2321 Assert(uSelSS != 0);
2322#endif
2323
2324 /* Write these host selector fields into the host-state area in the VMCS. */
2325 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_CS, uSelCS); AssertRCReturn(rc, rc);
2326 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_SS, uSelSS); AssertRCReturn(rc, rc);
2327#if HC_ARCH_BITS == 64
2328 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_DS, uSelDS); AssertRCReturn(rc, rc);
2329 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_ES, uSelES); AssertRCReturn(rc, rc);
2330 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_FS, uSelFS); AssertRCReturn(rc, rc);
2331 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_GS, uSelGS); AssertRCReturn(rc, rc);
2332#endif
2333 rc = VMXWriteVmcs32(VMX_VMCS16_HOST_FIELD_TR, uSelTR); AssertRCReturn(rc, rc);
2334
2335 /*
2336 * Host GDTR and IDTR.
2337 */
2338 RTGDTR Gdtr;
2339 RT_ZERO(Gdtr);
2340#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2341 if (HMVMX_IS_64BIT_HOST_MODE())
2342 {
2343 X86XDTR64 Gdtr64;
2344 X86XDTR64 Idtr64;
2345 HMR0Get64bitGdtrAndIdtr(&Gdtr64, &Idtr64);
2346 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GDTR_BASE, Gdtr64.uAddr); AssertRCReturn(rc, rc);
2347 rc = VMXWriteVmcs64(VMX_VMCS_HOST_IDTR_BASE, Idtr64.uAddr); AssertRCReturn(rc, rc);
2348
2349 Gdtr.cbGdt = Gdtr64.cb;
2350 Gdtr.pGdt = (uintptr_t)Gdtr64.uAddr;
2351 }
2352 else
2353#endif
2354 {
2355 RTIDTR Idtr;
2356 ASMGetGDTR(&Gdtr);
2357 ASMGetIDTR(&Idtr);
2358 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, Gdtr.pGdt); AssertRCReturn(rc, rc);
2359 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, Idtr.pIdt); AssertRCReturn(rc, rc);
2360
2361#if HC_ARCH_BITS == 64
2362 /*
2363 * Determine if we need to manually need to restore the GDTR and IDTR limits as VT-x zaps them to the
2364 * maximum limit (0xffff) on every VM-exit.
2365 */
2366 if (Gdtr.cbGdt != 0xffff)
2367 {
2368 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_GDTR;
2369 AssertCompile(sizeof(Gdtr) == sizeof(X86XDTR64));
2370 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2371 }
2372
2373 /*
2374 * IDT limit is practically 0xfff. Therefore if the host has the limit as 0xfff, VT-x bloating the limit to 0xffff
2375 * is not a problem as it's not possible to get at them anyway. See Intel spec. 6.14.1 "64-Bit Mode IDT" and
2376 * Intel spec. 6.2 "Exception and Interrupt Vectors".
2377 */
2378 if (Idtr.cbIdt < 0x0fff)
2379 {
2380 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_IDTR;
2381 AssertCompile(sizeof(Idtr) == sizeof(X86XDTR64));
2382 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostIdtr, &Idtr, sizeof(X86XDTR64));
2383 }
2384#endif
2385 }
2386
2387 /*
2388 * Host TR base. Verify that TR selector doesn't point past the GDT. Masking off the TI and RPL bits
2389 * is effectively what the CPU does for "scaling by 8". TI is always 0 and RPL should be too in most cases.
2390 */
2391 if ((uSelTR & X86_SEL_MASK) > Gdtr.cbGdt)
2392 {
2393 AssertMsgFailed(("hmR0VmxSaveHostSegmentRegs: TR selector exceeds limit. TR=%RTsel cbGdt=%#x\n", uSelTR, Gdtr.cbGdt));
2394 return VERR_VMX_INVALID_HOST_STATE;
2395 }
2396
2397 PCX86DESCHC pDesc = (PCX86DESCHC)(Gdtr.pGdt + (uSelTR & X86_SEL_MASK));
2398#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2399 if (HMVMX_IS_64BIT_HOST_MODE())
2400 {
2401 /* We need the 64-bit TR base for hybrid darwin. */
2402 uint64_t u64TRBase = X86DESC64_BASE((PX86DESC64)pDesc);
2403 rc = VMXWriteVmcs64(VMX_VMCS_HOST_TR_BASE, u64TRBase);
2404 }
2405 else
2406#endif
2407 {
2408 uintptr_t uTRBase;
2409#if HC_ARCH_BITS == 64
2410 uTRBase = X86DESC64_BASE(pDesc);
2411
2412 /*
2413 * VT-x unconditionally restores the TR limit to 0x67 and type to 11 (32-bit busy TSS) on all VM-exits.
2414 * The type is the same for 64-bit busy TSS[1]. The limit needs manual restoration if the host has something else.
2415 * Task switching is not supported in 64-bit mode[2], but the limit still matters as IOPM is supported in 64-bit mode.
2416 * Restoring the limit lazily while returning to ring-3 is safe because IOPM is not applicable in ring-0.
2417 *
2418 * [1] See Intel spec. 3.5 "System Descriptor Types".
2419 * [2] See Intel spec. 7.2.3 "TSS Descriptor in 64-bit mode".
2420 */
2421 Assert(pDesc->System.u4Type == 11);
2422 if ( pDesc->System.u16LimitLow != 0x67
2423 || pDesc->System.u4LimitHigh)
2424 {
2425 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_SEL_TR;
2426 pVCpu->hm.s.vmx.RestoreHost.uHostSelTR = uSelTR;
2427
2428 /* Store the GDTR here as we need it while restoring TR. */
2429 memcpy(&pVCpu->hm.s.vmx.RestoreHost.HostGdtr, &Gdtr, sizeof(X86XDTR64));
2430 }
2431#else
2432 uTRBase = X86DESC_BASE(pDesc);
2433#endif
2434 rc = VMXWriteVmcsHstN(VMX_VMCS_HOST_TR_BASE, uTRBase);
2435 }
2436 AssertRCReturn(rc, rc);
2437
2438 /*
2439 * Host FS base and GS base.
2440 */
2441#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2442 if (HMVMX_IS_64BIT_HOST_MODE())
2443 {
2444 uint64_t u64FSBase = ASMRdMsr(MSR_K8_FS_BASE);
2445 uint64_t u64GSBase = ASMRdMsr(MSR_K8_GS_BASE);
2446 rc = VMXWriteVmcs64(VMX_VMCS_HOST_FS_BASE, u64FSBase); AssertRCReturn(rc, rc);
2447 rc = VMXWriteVmcs64(VMX_VMCS_HOST_GS_BASE, u64GSBase); AssertRCReturn(rc, rc);
2448
2449# if HC_ARCH_BITS == 64
2450 /* Store the base if we have to restore FS or GS manually as we need to restore the base as well. */
2451 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_FS)
2452 pVCpu->hm.s.vmx.RestoreHost.uHostFSBase = u64FSBase;
2453 if (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_SEL_GS)
2454 pVCpu->hm.s.vmx.RestoreHost.uHostGSBase = u64GSBase;
2455# endif
2456 }
2457#endif
2458 return rc;
2459}
2460
2461
2462/**
2463 * Saves certain host MSRs in the VM-Exit MSR-load area and some in the
2464 * host-state area of the VMCS. Theses MSRs will be automatically restored on
2465 * the host after every successful VM exit.
2466 *
2467 * @returns VBox status code.
2468 * @param pVM Pointer to the VM.
2469 * @param pVCpu Pointer to the VMCPU.
2470 */
2471DECLINLINE(int) hmR0VmxSaveHostMsrs(PVM pVM, PVMCPU pVCpu)
2472{
2473 AssertPtr(pVCpu);
2474 AssertPtr(pVCpu->hm.s.vmx.pvHostMsr);
2475
2476 int rc = VINF_SUCCESS;
2477#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
2478 PVMXMSR pHostMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvHostMsr;
2479 uint32_t cHostMsrs = 0;
2480 uint32_t u32HostExtFeatures = pVM->hm.s.cpuid.u32AMDFeatureEDX;
2481
2482 if (u32HostExtFeatures & (X86_CPUID_EXT_FEATURE_EDX_NX | X86_CPUID_EXT_FEATURE_EDX_LONG_MODE))
2483 {
2484 uint64_t u64HostEfer = ASMRdMsr(MSR_K6_EFER);
2485
2486# if HC_ARCH_BITS == 64
2487 /* Paranoia. 64-bit code requires these bits to be set always. */
2488 Assert((u64HostEfer & (MSR_K6_EFER_LMA | MSR_K6_EFER_LME)) == (MSR_K6_EFER_LMA | MSR_K6_EFER_LME));
2489
2490 /*
2491 * We currently do not save/restore host EFER, we just make sure it doesn't get modified by VT-x operation.
2492 * All guest accesses (read, write) on EFER cause VM-exits. If we are to conditionally load the guest EFER for
2493 * some reason (e.g. allow transparent reads) we would activate the code below.
2494 */
2495# if 0
2496 /* All our supported 64-bit host platforms must have NXE bit set. Otherwise we can change the below code to save EFER. */
2497 Assert(u64HostEfer & (MSR_K6_EFER_NXE));
2498 /* The SCE bit is only applicable in 64-bit mode. Save EFER if it doesn't match what the guest has.
2499 See Intel spec. 30.10.4.3 "Handling the SYSCALL and SYSRET Instructions". */
2500 if (CPUMIsGuestInLongMode(pVCpu))
2501 {
2502 uint64_t u64GuestEfer;
2503 rc = CPUMQueryGuestMsr(pVCpu, MSR_K6_EFER, &u64GuestEfer);
2504 AssertRC(rc);
2505
2506 if ((u64HostEfer & MSR_K6_EFER_SCE) != (u64GuestEfer & MSR_K6_EFER_SCE))
2507 {
2508 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2509 pHostMsr->u32Reserved = 0;
2510 pHostMsr->u64Value = u64HostEfer;
2511 pHostMsr++; cHostMsrs++;
2512 }
2513 }
2514# endif
2515# else /* HC_ARCH_BITS != 64 */
2516 pHostMsr->u32IndexMSR = MSR_K6_EFER;
2517 pHostMsr->u32Reserved = 0;
2518# if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2519 if (CPUMIsGuestInLongMode(pVCpu))
2520 {
2521 /* Must match the EFER value in our 64 bits switcher. */
2522 pHostMsr->u64Value = u64HostEfer | MSR_K6_EFER_LME | MSR_K6_EFER_SCE | MSR_K6_EFER_NXE;
2523 }
2524 else
2525# endif
2526 pHostMsr->u64Value = u64HostEfer;
2527 pHostMsr++; cHostMsrs++;
2528# endif /* HC_ARCH_BITS == 64 */
2529 }
2530
2531# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2532 if (HMVMX_IS_64BIT_HOST_MODE())
2533 {
2534 pHostMsr->u32IndexMSR = MSR_K6_STAR;
2535 pHostMsr->u32Reserved = 0;
2536 pHostMsr->u64Value = ASMRdMsr(MSR_K6_STAR); /* legacy syscall eip, cs & ss */
2537 pHostMsr++; cHostMsrs++;
2538 pHostMsr->u32IndexMSR = MSR_K8_LSTAR;
2539 pHostMsr->u32Reserved = 0;
2540 pHostMsr->u64Value = ASMRdMsr(MSR_K8_LSTAR); /* 64-bit mode syscall rip */
2541 pHostMsr++; cHostMsrs++;
2542 pHostMsr->u32IndexMSR = MSR_K8_SF_MASK;
2543 pHostMsr->u32Reserved = 0;
2544 pHostMsr->u64Value = ASMRdMsr(MSR_K8_SF_MASK); /* syscall flag mask */
2545 pHostMsr++; cHostMsrs++;
2546 pHostMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
2547 pHostMsr->u32Reserved = 0;
2548 pHostMsr->u64Value = ASMRdMsr(MSR_K8_KERNEL_GS_BASE); /* swapgs exchange value */
2549 pHostMsr++; cHostMsrs++;
2550 }
2551# endif
2552
2553 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
2554 if (RT_UNLIKELY(cHostMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)))
2555 {
2556 LogRel(("cHostMsrs=%u Cpu=%u\n", cHostMsrs, (unsigned)MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc)));
2557 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_HOST_MSR_STORAGE;
2558 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2559 }
2560
2561 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, cHostMsrs);
2562#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
2563
2564 /*
2565 * Host Sysenter MSRs.
2566 */
2567 rc = VMXWriteVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, ASMRdMsr_Low(MSR_IA32_SYSENTER_CS));
2568 AssertRCReturn(rc, rc);
2569#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
2570 if (HMVMX_IS_64BIT_HOST_MODE())
2571 {
2572 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2573 AssertRCReturn(rc, rc);
2574 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2575 }
2576 else
2577 {
2578 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2579 AssertRCReturn(rc, rc);
2580 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2581 }
2582#elif HC_ARCH_BITS == 32
2583 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr_Low(MSR_IA32_SYSENTER_ESP));
2584 AssertRCReturn(rc, rc);
2585 rc = VMXWriteVmcs32(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr_Low(MSR_IA32_SYSENTER_EIP));
2586#else
2587 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, ASMRdMsr(MSR_IA32_SYSENTER_ESP));
2588 AssertRCReturn(rc, rc);
2589 rc = VMXWriteVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, ASMRdMsr(MSR_IA32_SYSENTER_EIP));
2590#endif
2591 AssertRCReturn(rc, rc);
2592
2593 /** @todo IA32_PERF_GLOBALCTRL, IA32_PAT, IA32_EFER, also see
2594 * hmR0VmxSetupExitCtls() !! */
2595 return rc;
2596}
2597
2598
2599/**
2600 * Sets up VM-entry controls in the VMCS. These controls can affect things done
2601 * on VM-exit; e.g. "load debug controls", see Intel spec. 24.8.1 "VM-entry
2602 * controls".
2603 *
2604 * @returns VBox status code.
2605 * @param pVCpu Pointer to the VMCPU.
2606 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2607 * out-of-sync. Make sure to update the required fields
2608 * before using them.
2609 *
2610 * @remarks No-long-jump zone!!!
2611 */
2612DECLINLINE(int) hmR0VmxLoadGuestEntryCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2613{
2614 int rc = VINF_SUCCESS;
2615 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_ENTRY_CTLS)
2616 {
2617 PVM pVM = pVCpu->CTX_SUFF(pVM);
2618 uint32_t val = pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0; /* Bits set here must be set in the VMCS. */
2619 uint32_t zap = pVM->hm.s.vmx.msr.vmx_entry.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2620
2621 /* Load debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x capable CPUs only supports the 1-setting of this bit. */
2622 val |= VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG;
2623
2624 /* Set if the guest is in long mode. This will set/clear the EFER.LMA bit on VM-entry. */
2625 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2626 val |= VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST;
2627 else
2628 Assert(!(val & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST));
2629
2630 /*
2631 * The following should not be set (since we're not in SMM mode):
2632 * - VMX_VMCS_CTRL_ENTRY_ENTRY_SMM
2633 * - VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON
2634 */
2635
2636 /** @todo VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR,
2637 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR,
2638 * VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR */
2639
2640 if ((val & zap) != val)
2641 {
2642 LogRel(("hmR0VmxLoadGuestEntryCtls: invalid VM-entry controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2643 pVM->hm.s.vmx.msr.vmx_entry.n.disallowed0, val, zap));
2644 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_ENTRY;
2645 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2646 }
2647
2648 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY, val);
2649 AssertRCReturn(rc, rc);
2650
2651 /* Update VCPU with the currently set VM-exit controls. */
2652 pVCpu->hm.s.vmx.u32EntryCtls = val;
2653 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_ENTRY_CTLS;
2654 }
2655 return rc;
2656}
2657
2658
2659/**
2660 * Sets up the VM-exit controls in the VMCS.
2661 *
2662 * @returns VBox status code.
2663 * @param pVM Pointer to the VM.
2664 * @param pVCpu Pointer to the VMCPU.
2665 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2666 * out-of-sync. Make sure to update the required fields
2667 * before using them.
2668 *
2669 * @remarks requires EFER.
2670 */
2671DECLINLINE(int) hmR0VmxLoadGuestExitCtls(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2672{
2673 int rc = VINF_SUCCESS;
2674 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_EXIT_CTLS)
2675 {
2676 PVM pVM = pVCpu->CTX_SUFF(pVM);
2677 uint32_t val = pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0; /* Bits set here must be set in the VMCS. */
2678 uint32_t zap = pVM->hm.s.vmx.msr.vmx_exit.n.allowed1; /* Bits cleared here must be cleared in the VMCS. */
2679
2680 /* Save debug controls (DR7 & IA32_DEBUGCTL_MSR). The first VT-x CPUs only supported the 1-setting of this bit. */
2681 val |= VMX_VMCS_CTRL_EXIT_SAVE_DEBUG;
2682
2683 /*
2684 * Set the host long mode active (EFER.LMA) bit (which Intel calls "Host address-space size") if necessary.
2685 * On VM-exit, VT-x sets both the host EFER.LMA and EFER.LME bit to this value. See assertion in hmR0VmxSaveHostMsrs().
2686 */
2687#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
2688 if (HMVMX_IS_64BIT_HOST_MODE())
2689 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE;
2690 else
2691 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2692#elif HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS)
2693 if (CPUMIsGuestInLongModeEx(pMixedCtx))
2694 val |= VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE; /* The switcher goes to long mode. */
2695 else
2696 Assert(!(val & VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE));
2697#endif
2698
2699 /* Don't acknowledge external interrupts on VM-exit. We want to let the host do that. */
2700 Assert(!(val & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT));
2701
2702 /** @todo VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR,
2703 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR,
2704 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR,
2705 * VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR,
2706 * VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR. */
2707
2708 if (pVM->hm.s.vmx.msr.vmx_exit.n.allowed1 & VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER)
2709 val |= VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER;
2710
2711 if ((val & zap) != val)
2712 {
2713 LogRel(("hmR0VmxSetupProcCtls: invalid VM-exit controls combo! cpu=%RX64 val=%RX64 zap=%RX64\n",
2714 pVM->hm.s.vmx.msr.vmx_exit.n.disallowed0, val, zap));
2715 pVCpu->hm.s.u32HMError = VMX_UFC_CTRL_EXIT;
2716 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
2717 }
2718
2719 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT, val);
2720 AssertRCReturn(rc, rc);
2721
2722 /* Update VCPU with the currently set VM-exit controls. */
2723 pVCpu->hm.s.vmx.u32ExitCtls = val;
2724 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_EXIT_CTLS;
2725 }
2726 return rc;
2727}
2728
2729
2730/**
2731 * Loads the guest APIC and related state.
2732 *
2733 * @returns VBox status code.
2734 * @param pVM Pointer to the VM.
2735 * @param pVCpu Pointer to the VMCPU.
2736 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2737 * out-of-sync. Make sure to update the required fields
2738 * before using them.
2739 */
2740DECLINLINE(int) hmR0VmxLoadGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2741{
2742 int rc = VINF_SUCCESS;
2743 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_APIC_STATE)
2744 {
2745 /* Setup TPR shadowing. Also setup TPR patching for 32-bit guests. */
2746 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
2747 {
2748 Assert(pVCpu->hm.s.vmx.HCPhysVirtApic);
2749
2750 bool fPendingIntr = false;
2751 uint8_t u8Tpr = 0;
2752 uint8_t u8PendingIntr = 0;
2753 rc = PDMApicGetTPR(pVCpu, &u8Tpr, &fPendingIntr, &u8PendingIntr);
2754 AssertRCReturn(rc, rc);
2755
2756 /*
2757 * If there are external interrupts pending but masked by the TPR value, instruct VT-x to cause a VM-exit when
2758 * the guest lowers its TPR below the highest-priority pending interrupt and we can deliver the interrupt.
2759 * If there are no external interrupts pending, set threshold to 0 to not cause a VM-exit. We will eventually deliver
2760 * the interrupt when we VM-exit for other reasons.
2761 */
2762 pVCpu->hm.s.vmx.pbVirtApic[0x80] = u8Tpr; /* Offset 0x80 is TPR in the APIC MMIO range. */
2763 uint32_t u32TprThreshold = 0;
2764 if (fPendingIntr)
2765 {
2766 /* Bits 3-0 of the TPR threshold field correspond to bits 7-4 of the TPR (which is the Task-Priority Class). */
2767 const uint8_t u8PendingPriority = (u8PendingIntr >> 4);
2768 const uint8_t u8TprPriority = (u8Tpr >> 4) & 7;
2769 if (u8PendingPriority <= u8TprPriority)
2770 u32TprThreshold = u8PendingPriority;
2771 else
2772 u32TprThreshold = u8TprPriority; /* Required for Vista 64-bit guest, see @bugref{6398}. */
2773 }
2774 Assert(!(u32TprThreshold & 0xfffffff0)); /* Bits 31:4 MBZ. */
2775
2776 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, u32TprThreshold);
2777 AssertRCReturn(rc, rc);
2778 }
2779
2780 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_APIC_STATE;
2781 }
2782 return rc;
2783}
2784
2785
2786/**
2787 * Gets the guest's interruptibility-state ("interrupt shadow" as AMD calls it).
2788 *
2789 * @returns Guest's interruptibility-state.
2790 * @param pVCpu Pointer to the VMCPU.
2791 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2792 * out-of-sync. Make sure to update the required fields
2793 * before using them.
2794 *
2795 * @remarks No-long-jump zone!!!
2796 * @remarks Has side-effects with VMCPU_FF_INHIBIT_INTERRUPTS force-flag.
2797 */
2798DECLINLINE(uint32_t) hmR0VmxGetGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2799{
2800 /*
2801 * Instructions like STI and MOV SS inhibit interrupts till the next instruction completes. Check if we should
2802 * inhibit interrupts or clear any existing interrupt-inhibition.
2803 */
2804 uint32_t uIntrState = 0;
2805 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS))
2806 {
2807 /* If inhibition is active, RIP & RFLAGS should've been accessed (i.e. read previously from the VMCS or from ring-3). */
2808 AssertMsg((pVCpu->hm.s.vmx.fUpdatedGuestState & (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS))
2809 == (HMVMX_UPDATED_GUEST_RIP | HMVMX_UPDATED_GUEST_RFLAGS), ("%#x\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
2810 if (pMixedCtx->rip != EMGetInhibitInterruptsPC(pVCpu))
2811 {
2812 /*
2813 * We can clear the inhibit force flag as even if we go back to the recompiler without executing guest code in
2814 * VT-x, the flag's condition to be cleared is met and thus the cleared state is correct.
2815 */
2816 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
2817 }
2818 else if (pMixedCtx->eflags.Bits.u1IF)
2819 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
2820 else
2821 uIntrState = VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS;
2822 }
2823 return uIntrState;
2824}
2825
2826
2827/**
2828 * Loads the guest's interruptibility-state into the guest-state area in the
2829 * VMCS.
2830 *
2831 * @returns VBox status code.
2832 * @param pVCpu Pointer to the VMCPU.
2833 * @param uIntrState The interruptibility-state to set.
2834 */
2835static int hmR0VmxLoadGuestIntrState(PVMCPU pVCpu, uint32_t uIntrState)
2836{
2837 AssertMsg(!(uIntrState & 0xfffffff0), ("%#x\n", uIntrState)); /* Bits 31:4 MBZ. */
2838 Assert((uIntrState & 0x3) != 0x3); /* Block-by-STI and MOV SS cannot be simultaneously set. */
2839 int rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, uIntrState);
2840 AssertRCReturn(rc, rc);
2841 return rc;
2842}
2843
2844
2845/**
2846 * Loads the guest's RIP into the guest-state area in the VMCS.
2847 *
2848 * @returns VBox status code.
2849 * @param pVCpu Pointer to the VMCPU.
2850 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2851 * out-of-sync. Make sure to update the required fields
2852 * before using them.
2853 *
2854 * @remarks No-long-jump zone!!!
2855 */
2856static int hmR0VmxLoadGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2857{
2858 int rc = VINF_SUCCESS;
2859 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RIP)
2860 {
2861 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RIP, pMixedCtx->rip);
2862 AssertRCReturn(rc, rc);
2863 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RIP;
2864 Log4(("Load: VMX_VMCS_GUEST_RIP=%#RX64 fContextUseFlags=%#x\n", pMixedCtx->rip, pVCpu->hm.s.fContextUseFlags));
2865 }
2866 return rc;
2867}
2868
2869
2870/**
2871 * Loads the guest's RSP into the guest-state area in the VMCS.
2872 *
2873 * @returns VBox status code.
2874 * @param pVCpu Pointer to the VMCPU.
2875 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2876 * out-of-sync. Make sure to update the required fields
2877 * before using them.
2878 *
2879 * @remarks No-long-jump zone!!!
2880 */
2881static int hmR0VmxLoadGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2882{
2883 int rc = VINF_SUCCESS;
2884 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RSP)
2885 {
2886 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_RSP, pMixedCtx->rsp);
2887 AssertRCReturn(rc, rc);
2888 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RSP;
2889 Log4(("Load: VMX_VMCS_GUEST_RSP=%#RX64\n", pMixedCtx->rsp));
2890 }
2891 return rc;
2892}
2893
2894
2895/**
2896 * Loads the guest's RFLAGS into the guest-state area in the VMCS.
2897 *
2898 * @returns VBox status code.
2899 * @param pVCpu Pointer to the VMCPU.
2900 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2901 * out-of-sync. Make sure to update the required fields
2902 * before using them.
2903 *
2904 * @remarks No-long-jump zone!!!
2905 */
2906static int hmR0VmxLoadGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2907{
2908 int rc = VINF_SUCCESS;
2909 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
2910 {
2911 /* Intel spec. 2.3.1 "System Flags and Fields in IA-32e Mode" claims the upper 32-bits of RFLAGS are reserved (MBZ).
2912 Let us assert it as such and use 32-bit VMWRITE. */
2913 Assert(!(pMixedCtx->rflags.u64 >> 32));
2914 X86EFLAGS Eflags = pMixedCtx->eflags;
2915 Eflags.u32 &= VMX_EFLAGS_RESERVED_0; /* Bits 22-31, 15, 5 & 3 MBZ. */
2916 Eflags.u32 |= VMX_EFLAGS_RESERVED_1; /* Bit 1 MB1. */
2917
2918 /*
2919 * If we're emulating real-mode using Virtual 8086 mode, save the real-mode eflags so we can restore them on VM exit.
2920 * Modify the real-mode guest's eflags so that VT-x can run the real-mode guest code under Virtual 8086 mode.
2921 */
2922 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
2923 {
2924 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
2925 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
2926 pVCpu->hm.s.vmx.RealMode.Eflags.u32 = Eflags.u32; /* Save the original eflags of the real-mode guest. */
2927 Eflags.Bits.u1VM = 1; /* Set the Virtual 8086 mode bit. */
2928 Eflags.Bits.u2IOPL = 0; /* Change IOPL to 0, otherwise certain instructions won't fault. */
2929 }
2930
2931 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_RFLAGS, Eflags.u32);
2932 AssertRCReturn(rc, rc);
2933
2934 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_RFLAGS;
2935 Log4(("Load: VMX_VMCS_GUEST_RFLAGS=%#RX32\n", Eflags.u32));
2936 }
2937 return rc;
2938}
2939
2940
2941/**
2942 * Loads the guest RIP, RSP and RFLAGS into the guest-state area in the VMCS.
2943 *
2944 * @returns VBox status code.
2945 * @param pVCpu Pointer to the VMCPU.
2946 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2947 * out-of-sync. Make sure to update the required fields
2948 * before using them.
2949 *
2950 * @remarks No-long-jump zone!!!
2951 */
2952DECLINLINE(int) hmR0VmxLoadGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2953{
2954 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
2955 AssertRCReturn(rc, rc);
2956 rc = hmR0VmxLoadGuestRsp(pVCpu, pMixedCtx);
2957 AssertRCReturn(rc, rc);
2958 rc = hmR0VmxLoadGuestRflags(pVCpu, pMixedCtx);
2959 AssertRCReturn(rc, rc);
2960 return rc;
2961}
2962
2963
2964/**
2965 * Loads the guest CR0 control register into the guest-state area in the VMCS.
2966 * CR0 is partially shared with the host and we have to consider the FPU bits.
2967 *
2968 * @returns VBox status code.
2969 * @param pVM Pointer to the VM.
2970 * @param pVCpu Pointer to the VMCPU.
2971 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
2972 * out-of-sync. Make sure to update the required fields
2973 * before using them.
2974 *
2975 * @remarks No-long-jump zone!!!
2976 */
2977static int hmR0VmxLoadSharedCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
2978{
2979 /*
2980 * Guest CR0.
2981 * Guest FPU.
2982 */
2983 int rc = VINF_SUCCESS;
2984 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
2985 {
2986 Assert(!(pMixedCtx->cr0 >> 32));
2987 uint32_t u32GuestCR0 = pMixedCtx->cr0;
2988 PVM pVM = pVCpu->CTX_SUFF(pVM);
2989
2990 /* The guest's view (read access) of its CR0 is unblemished. */
2991 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, u32GuestCR0);
2992 AssertRCReturn(rc, rc);
2993 Log4(("Load: VMX_VMCS_CTRL_CR0_READ_SHADOW=%#RX32\n", u32GuestCR0));
2994
2995 /* Setup VT-x's view of the guest CR0. */
2996 /* Minimize VM-exits due to CR3 changes when we have NestedPaging. */
2997 if (pVM->hm.s.fNestedPaging)
2998 {
2999 if (CPUMIsGuestPagingEnabledEx(pMixedCtx))
3000 {
3001 /* The guest has paging enabled, let it access CR3 without causing a VM exit if supported. */
3002 pVCpu->hm.s.vmx.u32ProcCtls &= ~( VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3003 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT);
3004 }
3005 else
3006 {
3007 /* The guest doesn't have paging enabled, make CR3 access to cause VM exits to update our shadow. */
3008 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT
3009 | VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3010 }
3011
3012 /* If we have unrestricted guest execution, we never have to intercept CR3 reads. */
3013 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3014 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT;
3015
3016 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3017 AssertRCReturn(rc, rc);
3018 }
3019 else
3020 u32GuestCR0 |= X86_CR0_WP; /* Guest CPL 0 writes to its read-only pages should cause a #PF VM-exit. */
3021
3022 /*
3023 * Guest FPU bits.
3024 * Intel spec. 23.8 "Restrictions on VMX operation" mentions that CR0.NE bit must always be set on the first
3025 * CPUs to support VT-x and no mention of with regards to UX in VM-entry checks.
3026 */
3027 u32GuestCR0 |= X86_CR0_NE;
3028 bool fInterceptNM = false;
3029 if (CPUMIsGuestFPUStateActive(pVCpu))
3030 {
3031 fInterceptNM = false; /* Guest FPU active, no need to VM-exit on #NM. */
3032 /* The guest should still get #NM exceptions when it expects it to, so we should not clear TS & MP bits here.
3033 We're only concerned about -us- not intercepting #NMs when the guest-FPU is active. Not the guest itself! */
3034 }
3035 else
3036 {
3037 fInterceptNM = true; /* Guest FPU inactive, VM-exit on #NM for lazy FPU loading. */
3038 u32GuestCR0 |= X86_CR0_TS /* Guest can task switch quickly and do lazy FPU syncing. */
3039 | X86_CR0_MP; /* FWAIT/WAIT should not ignore CR0.TS and should generate #NM. */
3040 }
3041
3042 /* Catch floating point exceptions if we need to report them to the guest in a different way. */
3043 bool fInterceptMF = false;
3044 if (!(pMixedCtx->cr0 & X86_CR0_NE))
3045 fInterceptMF = true;
3046
3047 /* Finally, intercept all exceptions as we cannot directly inject them in real-mode, see hmR0VmxInjectEventVmcs(). */
3048 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3049 {
3050 Assert(PDMVmmDevHeapIsEnabled(pVM));
3051 Assert(pVM->hm.s.vmx.pRealModeTSS);
3052 pVCpu->hm.s.vmx.u32XcptBitmap |= HMVMX_REAL_MODE_XCPT_MASK;
3053 fInterceptNM = true;
3054 fInterceptMF = true;
3055 }
3056 else
3057 pVCpu->hm.s.vmx.u32XcptBitmap &= ~HMVMX_REAL_MODE_XCPT_MASK;
3058
3059 if (fInterceptNM)
3060 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_NM);
3061 else
3062 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_NM);
3063
3064 if (fInterceptMF)
3065 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_MF);
3066 else
3067 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_MF);
3068
3069 /* Additional intercepts for debugging, define these yourself explicitly. */
3070#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3071 pVCpu->hm.s.vmx.u32XcptBitmap |= 0
3072 | RT_BIT(X86_XCPT_BP)
3073 | RT_BIT(X86_XCPT_DB)
3074 | RT_BIT(X86_XCPT_DE)
3075 | RT_BIT(X86_XCPT_NM)
3076 | RT_BIT(X86_XCPT_UD)
3077 | RT_BIT(X86_XCPT_NP)
3078 | RT_BIT(X86_XCPT_SS)
3079 | RT_BIT(X86_XCPT_GP)
3080 | RT_BIT(X86_XCPT_PF)
3081 | RT_BIT(X86_XCPT_MF)
3082 ;
3083#elif defined(HMVMX_ALWAYS_TRAP_PF)
3084 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_PF);
3085#endif
3086
3087 Assert(pVM->hm.s.fNestedPaging || (pVCpu->hm.s.vmx.u32XcptBitmap & RT_BIT(X86_XCPT_PF)));
3088
3089 /* Set/clear the CR0 specific bits along with their exceptions (PE, PG, CD, NW). */
3090 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
3091 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
3092 if (pVM->hm.s.vmx.fUnrestrictedGuest) /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG). */
3093 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
3094 else
3095 Assert((uSetCR0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG));
3096
3097 u32GuestCR0 |= uSetCR0;
3098 u32GuestCR0 &= uZapCR0;
3099 u32GuestCR0 &= ~(X86_CR0_CD | X86_CR0_NW); /* Always enable caching. */
3100
3101 /* Write VT-x's view of the guest CR0 into the VMCS and update the exception bitmap. */
3102 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR0, u32GuestCR0);
3103 AssertRCReturn(rc, rc);
3104 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3105 AssertRCReturn(rc, rc);
3106 Log4(("Load: VMX_VMCS_GUEST_CR0=%#RX32 (uSetCR0=%#RX32 uZapCR0=%#RX32)\n", u32GuestCR0, uSetCR0, uZapCR0));
3107
3108 /*
3109 * CR0 is shared between host and guest along with a CR0 read shadow. Therefore, certain bits must not be changed
3110 * by the guest because VT-x ignores saving/restoring them (namely CD, ET, NW) and for certain other bits
3111 * we want to be notified immediately of guest CR0 changes (e.g. PG to update our shadow page tables).
3112 */
3113 uint32_t u32CR0Mask = 0;
3114 u32CR0Mask = X86_CR0_PE
3115 | X86_CR0_NE
3116 | X86_CR0_WP
3117 | X86_CR0_PG
3118 | X86_CR0_ET /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.ET */
3119 | X86_CR0_CD /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.CD */
3120 | X86_CR0_NW; /* Bit ignored on VM-entry and VM-exit. Don't let the guest modify the host CR0.NW */
3121 if (pVM->hm.s.vmx.fUnrestrictedGuest)
3122 u32CR0Mask &= ~X86_CR0_PE;
3123 if (pVM->hm.s.fNestedPaging)
3124 u32CR0Mask &= ~X86_CR0_WP;
3125
3126 /* If the guest FPU state is active, don't need to VM-exit on writes to FPU related bits in CR0. */
3127 if (fInterceptNM)
3128 u32CR0Mask |= (X86_CR0_TS | X86_CR0_MP);
3129 else
3130 u32CR0Mask &= ~(X86_CR0_TS | X86_CR0_MP);
3131
3132 /* Write the CR0 mask into the VMCS and update the VCPU's copy of the current CR0 mask. */
3133 pVCpu->hm.s.vmx.u32CR0Mask = u32CR0Mask;
3134 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR0_MASK, u32CR0Mask);
3135 AssertRCReturn(rc, rc);
3136
3137 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR0;
3138 }
3139 return rc;
3140}
3141
3142
3143/**
3144 * Loads the guest control registers (CR3, CR4) into the guest-state area
3145 * in the VMCS.
3146 *
3147 * @returns VBox status code.
3148 * @param pVM Pointer to the VM.
3149 * @param pVCpu Pointer to the VMCPU.
3150 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3151 * out-of-sync. Make sure to update the required fields
3152 * before using them.
3153 *
3154 * @remarks No-long-jump zone!!!
3155 */
3156static int hmR0VmxLoadGuestCR3AndCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3157{
3158 int rc = VINF_SUCCESS;
3159 PVM pVM = pVCpu->CTX_SUFF(pVM);
3160
3161 /*
3162 * Guest CR2.
3163 * It's always loaded in the assembler code. Nothing to do here.
3164 */
3165
3166 /*
3167 * Guest CR3.
3168 */
3169 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR3)
3170 {
3171 RTGCPHYS GCPhysGuestCR3 = NIL_RTGCPHYS;
3172 if (pVM->hm.s.fNestedPaging)
3173 {
3174 pVCpu->hm.s.vmx.HCPhysEPTP = PGMGetHyperCR3(pVCpu);
3175
3176 /* Validate. See Intel spec. 28.2.2 "EPT Translation Mechanism" and 24.6.11 "Extended-Page-Table Pointer (EPTP)" */
3177 Assert(pVCpu->hm.s.vmx.HCPhysEPTP);
3178 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & UINT64_C(0xfff0000000000000)));
3179 Assert(!(pVCpu->hm.s.vmx.HCPhysEPTP & 0xfff));
3180
3181 /* VMX_EPT_MEMTYPE_WB support is already checked in hmR0VmxSetupTaggedTlb(). */
3182 pVCpu->hm.s.vmx.HCPhysEPTP |= VMX_EPT_MEMTYPE_WB
3183 | (VMX_EPT_PAGE_WALK_LENGTH_DEFAULT << VMX_EPT_PAGE_WALK_LENGTH_SHIFT);
3184
3185 /* Validate. See Intel spec. 26.2.1 "Checks on VMX Controls" */
3186 AssertMsg( ((pVCpu->hm.s.vmx.HCPhysEPTP >> 3) & 0x07) == 3 /* Bits 3:5 (EPT page walk length - 1) must be 3. */
3187 && ((pVCpu->hm.s.vmx.HCPhysEPTP >> 6) & 0x3f) == 0, /* Bits 6:11 MBZ. */
3188 ("EPTP %#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3189
3190 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, pVCpu->hm.s.vmx.HCPhysEPTP);
3191 AssertRCReturn(rc, rc);
3192 Log4(("Load: VMX_VMCS64_CTRL_EPTP_FULL=%#RX64\n", pVCpu->hm.s.vmx.HCPhysEPTP));
3193
3194 if ( pVM->hm.s.vmx.fUnrestrictedGuest
3195 || CPUMIsGuestPagingEnabledEx(pMixedCtx))
3196 {
3197 /* If the guest is in PAE mode, pass the PDPEs to VT-x using the VMCS fields. */
3198 if (CPUMIsGuestInPAEModeEx(pMixedCtx))
3199 {
3200 rc = PGMGstGetPaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]); AssertRCReturn(rc, rc);
3201 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
3202 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
3203 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
3204 rc = VMXWriteVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
3205 }
3206
3207 /* The guest's view of its CR3 is unblemished with Nested Paging when the guest is using paging or we
3208 have Unrestricted Execution to handle the guest when it's not using paging. */
3209 GCPhysGuestCR3 = pMixedCtx->cr3;
3210 }
3211 else
3212 {
3213 /*
3214 * The guest is not using paging, but the CPU (VT-x) has to. While the guest thinks it accesses physical memory
3215 * directly, we use our identity-mapped page table to map guest-linear to guest-physical addresses.
3216 * EPT takes care of translating it to host-physical addresses.
3217 */
3218 RTGCPHYS GCPhys;
3219 Assert(pVM->hm.s.vmx.pNonPagingModeEPTPageTable);
3220 Assert(PDMVmmDevHeapIsEnabled(pVM));
3221
3222 /* We obtain it here every time as the guest could have relocated this PCI region. */
3223 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pNonPagingModeEPTPageTable, &GCPhys);
3224 AssertRCReturn(rc, rc);
3225
3226 GCPhysGuestCR3 = GCPhys;
3227 }
3228
3229 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RGv (GstN)\n", GCPhysGuestCR3));
3230 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_CR3, GCPhysGuestCR3);
3231 }
3232 else
3233 {
3234 /* Non-nested paging case, just use the hypervisor's CR3. */
3235 RTHCPHYS HCPhysGuestCR3 = PGMGetHyperCR3(pVCpu);
3236
3237 Log4(("Load: VMX_VMCS_GUEST_CR3=%#RHv (HstN)\n", HCPhysGuestCR3));
3238 rc = VMXWriteVmcsHstN(VMX_VMCS_GUEST_CR3, HCPhysGuestCR3);
3239 }
3240 AssertRCReturn(rc, rc);
3241
3242 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR3;
3243 }
3244
3245 /*
3246 * Guest CR4.
3247 */
3248 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR4)
3249 {
3250 Assert(!(pMixedCtx->cr4 >> 32));
3251 uint32_t u32GuestCR4 = pMixedCtx->cr4;
3252
3253 /* The guest's view of its CR4 is unblemished. */
3254 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, u32GuestCR4);
3255 AssertRCReturn(rc, rc);
3256 Log4(("Load: VMX_VMCS_CTRL_CR4_READ_SHADOW=%#RX32\n", u32GuestCR4));
3257
3258 /* Setup VT-x's view of the guest CR4. */
3259 /*
3260 * If we're emulating real-mode using virtual-8086 mode, we want to redirect software interrupts to the 8086 program
3261 * interrupt handler. Clear the VME bit (the interrupt redirection bitmap is already all 0, see hmR3InitFinalizeR0())
3262 * See Intel spec. 20.2 "Software Interrupt Handling Methods While in Virtual-8086 Mode".
3263 */
3264 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3265 {
3266 Assert(pVM->hm.s.vmx.pRealModeTSS);
3267 Assert(PDMVmmDevHeapIsEnabled(pVM));
3268 u32GuestCR4 &= ~X86_CR4_VME;
3269 }
3270
3271 if (pVM->hm.s.fNestedPaging)
3272 {
3273 if ( !CPUMIsGuestPagingEnabledEx(pMixedCtx)
3274 && !pVM->hm.s.vmx.fUnrestrictedGuest)
3275 {
3276 /* We use 4 MB pages in our identity mapping page table when the guest doesn't have paging. */
3277 u32GuestCR4 |= X86_CR4_PSE;
3278 /* Our identity mapping is a 32 bits page directory. */
3279 u32GuestCR4 &= ~X86_CR4_PAE;
3280 }
3281 /* else use guest CR4.*/
3282 }
3283 else
3284 {
3285 /*
3286 * The shadow paging modes and guest paging modes are different, the shadow is in accordance with the host
3287 * paging mode and thus we need to adjust VT-x's view of CR4 depending on our shadow page tables.
3288 */
3289 switch (pVCpu->hm.s.enmShadowMode)
3290 {
3291 case PGMMODE_REAL: /* Real-mode. */
3292 case PGMMODE_PROTECTED: /* Protected mode without paging. */
3293 case PGMMODE_32_BIT: /* 32-bit paging. */
3294 {
3295 u32GuestCR4 &= ~X86_CR4_PAE;
3296 break;
3297 }
3298
3299 case PGMMODE_PAE: /* PAE paging. */
3300 case PGMMODE_PAE_NX: /* PAE paging with NX. */
3301 {
3302 u32GuestCR4 |= X86_CR4_PAE;
3303 break;
3304 }
3305
3306 case PGMMODE_AMD64: /* 64-bit AMD paging (long mode). */
3307 case PGMMODE_AMD64_NX: /* 64-bit AMD paging (long mode) with NX enabled. */
3308#ifdef VBOX_ENABLE_64_BITS_GUESTS
3309 break;
3310#endif
3311 default:
3312 AssertFailed();
3313 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
3314 }
3315 }
3316
3317 /* We need to set and clear the CR4 specific bits here (mainly the X86_CR4_VMXE bit). */
3318 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3319 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
3320 u32GuestCR4 |= uSetCR4;
3321 u32GuestCR4 &= uZapCR4;
3322
3323 /* Write VT-x's view of the guest CR4 into the VMCS. */
3324 Log4(("Load: VMX_VMCS_GUEST_CR4=%#RX32 (Set=%#RX32 Zap=%#RX32)\n", u32GuestCR4, uSetCR4, uZapCR4));
3325 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_CR4, u32GuestCR4);
3326 AssertRCReturn(rc, rc);
3327
3328 /* Setup CR4 mask. CR4 flags owned by the host, if the guest attempts to change them, that would cause a VM exit. */
3329 uint32_t u32CR4Mask = 0;
3330 u32CR4Mask = X86_CR4_VME
3331 | X86_CR4_PAE
3332 | X86_CR4_PGE
3333 | X86_CR4_PSE
3334 | X86_CR4_VMXE;
3335 pVCpu->hm.s.vmx.u32CR4Mask = u32CR4Mask;
3336 rc = VMXWriteVmcs32(VMX_VMCS_CTRL_CR4_MASK, u32CR4Mask);
3337 AssertRCReturn(rc, rc);
3338
3339 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR4;
3340 }
3341 return rc;
3342}
3343
3344
3345/**
3346 * Loads the guest debug registers into the guest-state area in the VMCS.
3347 * This also sets up whether #DB and MOV DRx accesses cause VM exits.
3348 *
3349 * The guest debug bits are partially shared with the host (e.g. DR6, DR0-3).
3350 *
3351 * @returns VBox status code.
3352 * @param pVCpu Pointer to the VMCPU.
3353 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3354 * out-of-sync. Make sure to update the required fields
3355 * before using them.
3356 *
3357 * @remarks No-long-jump zone!!!
3358 */
3359static int hmR0VmxLoadSharedDebugState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3360{
3361 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG))
3362 return VINF_SUCCESS;
3363
3364#ifdef VBOX_STRICT
3365 /* Validate. Intel spec. 26.3.1.1 "Checks on Guest Controls Registers, Debug Registers, MSRs" */
3366 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
3367 {
3368 /* Validate. Intel spec. 17.2 "Debug Registers", recompiler paranoia checks. */
3369 Assert((pMixedCtx->dr[7] & (X86_DR7_MBZ_MASK | X86_DR7_RAZ_MASK)) == 0); /* Bits 63:32, 15, 14, 12, 11 are reserved. */
3370 Assert((pMixedCtx->dr[7] & X86_DR7_RA1_MASK) == X86_DR7_RA1_MASK); /* Bit 10 is reserved (RA1). */
3371 }
3372#endif
3373
3374 int rc;
3375 PVM pVM = pVCpu->CTX_SUFF(pVM);
3376 bool fInterceptDB = false;
3377 bool fInterceptMovDRx = false;
3378 if (pVCpu->hm.s.fSingleInstruction || DBGFIsStepping(pVCpu))
3379 {
3380 /* If the CPU supports the monitor trap flag, use it for single stepping in DBGF and avoid intercepting #DB. */
3381 if (pVM->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG)
3382 {
3383 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
3384 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3385 AssertRCReturn(rc, rc);
3386 Assert(fInterceptDB == false);
3387 }
3388 else
3389 {
3390 pMixedCtx->eflags.u32 |= X86_EFL_TF;
3391 pVCpu->hm.s.fClearTrapFlag = true;
3392 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RFLAGS;
3393 fInterceptDB = true;
3394 }
3395 }
3396
3397 if (fInterceptDB || (CPUMGetHyperDR7(pVCpu) & X86_DR7_ENABLED_MASK))
3398 {
3399 /*
3400 * Use the combined guest and host DRx values found in the hypervisor
3401 * register set because the debugger has breakpoints active or someone
3402 * is single stepping on the host side without a monitor trap flag.
3403 *
3404 * Note! DBGF expects a clean DR6 state before executing guest code.
3405 */
3406 if (!CPUMIsHyperDebugStateActive(pVCpu))
3407 CPUMR0LoadHyperDebugState(pVCpu, true /* include DR6 */);
3408 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
3409 Assert(CPUMIsHyperDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
3410
3411 /* Update DR7. (The other DRx values are handled by CPUM one way or the other.) */
3412 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)CPUMGetHyperDR7(pVCpu));
3413 AssertRCReturn(rc, rc);
3414
3415 fInterceptDB = true;
3416 fInterceptMovDRx = true;
3417 }
3418 else
3419 {
3420 /*
3421 * If the guest has enabled debug registers, we need to load them prior to
3422 * executing guest code so they'll trigger at the right time.
3423 */
3424 if (pMixedCtx->dr[7] & (X86_DR7_ENABLED_MASK | X86_DR7_GD)) /** @todo Why GD? */
3425 {
3426 if (!CPUMIsGuestDebugStateActive(pVCpu))
3427 {
3428 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
3429 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxArmed);
3430 }
3431 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
3432 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
3433 }
3434 /*
3435 * If no debugging enabled, we'll lazy load DR0-3. Unlike on AMD-V, we
3436 * must intercept #DB in order to maintain a correct DR6 guest value.
3437 */
3438 else if (!CPUMIsGuestDebugStateActive(pVCpu))
3439 {
3440 fInterceptMovDRx = true;
3441 fInterceptDB = true;
3442 }
3443
3444 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, pMixedCtx->dr[7]);
3445 AssertRCReturn(rc, rc);
3446 }
3447
3448 /*
3449 * Update the exception bitmap regarding intercepting #DB generated by the guest.
3450 */
3451 if (fInterceptDB)
3452 pVCpu->hm.s.vmx.u32XcptBitmap |= RT_BIT(X86_XCPT_DB);
3453 else if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3454 {
3455#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
3456 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
3457#endif
3458 }
3459 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
3460 AssertRCReturn(rc, rc);
3461
3462 /*
3463 * Update the processor-based VM-execution controls regarding intercepting MOV DRx instructions.
3464 */
3465 if (fInterceptMovDRx)
3466 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3467 else
3468 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
3469 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
3470 AssertRCReturn(rc, rc);
3471
3472 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_DEBUG;
3473 return VINF_SUCCESS;
3474}
3475
3476
3477#ifdef VBOX_STRICT
3478/**
3479 * Strict function to validate segment registers.
3480 *
3481 * @remarks ASSUMES CR0 is up to date.
3482 */
3483static void hmR0VmxValidateSegmentRegs(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
3484{
3485 /* Validate segment registers. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers". */
3486 /* NOTE: The reason we check for attribute value 0 and not just the unusable bit here is because hmR0VmxWriteSegmentReg()
3487 * only updates the VMCS' copy of the value with the unusable bit and doesn't change the guest-context value. */
3488 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
3489 && ( !CPUMIsGuestInRealModeEx(pCtx)
3490 && !CPUMIsGuestInV86ModeEx(pCtx)))
3491 {
3492 /* Protected mode checks */
3493 /* CS */
3494 Assert(pCtx->cs.Attr.n.u1Present);
3495 Assert(!(pCtx->cs.Attr.u & 0xf00));
3496 Assert(!(pCtx->cs.Attr.u & 0xfffe0000));
3497 Assert( (pCtx->cs.u32Limit & 0xfff) == 0xfff
3498 || !(pCtx->cs.Attr.n.u1Granularity));
3499 Assert( !(pCtx->cs.u32Limit & 0xfff00000)
3500 || (pCtx->cs.Attr.n.u1Granularity));
3501 /* CS cannot be loaded with NULL in protected mode. */
3502 Assert(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE)); /** @todo is this really true even for 64-bit CS?!? */
3503 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
3504 Assert(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl);
3505 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
3506 Assert(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl);
3507 else
3508 AssertMsgFailed(("Invalid CS Type %#x\n", pCtx->cs.Attr.n.u2Dpl));
3509 /* SS */
3510 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3511 Assert(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL));
3512 if ( !(pCtx->cr0 & X86_CR0_PE)
3513 || pCtx->cs.Attr.n.u4Type == 3)
3514 {
3515 Assert(!pCtx->ss.Attr.n.u2Dpl);
3516 }
3517 if (pCtx->ss.Attr.u && !(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
3518 {
3519 Assert((pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL));
3520 Assert(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7);
3521 Assert(pCtx->ss.Attr.n.u1Present);
3522 Assert(!(pCtx->ss.Attr.u & 0xf00));
3523 Assert(!(pCtx->ss.Attr.u & 0xfffe0000));
3524 Assert( (pCtx->ss.u32Limit & 0xfff) == 0xfff
3525 || !(pCtx->ss.Attr.n.u1Granularity));
3526 Assert( !(pCtx->ss.u32Limit & 0xfff00000)
3527 || (pCtx->ss.Attr.n.u1Granularity));
3528 }
3529 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
3530 if (pCtx->ds.Attr.u && !(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
3531 {
3532 Assert(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3533 Assert(pCtx->ds.Attr.n.u1Present);
3534 Assert(pCtx->ds.Attr.n.u4Type > 11 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL));
3535 Assert(!(pCtx->ds.Attr.u & 0xf00));
3536 Assert(!(pCtx->ds.Attr.u & 0xfffe0000));
3537 Assert( (pCtx->ds.u32Limit & 0xfff) == 0xfff
3538 || !(pCtx->ds.Attr.n.u1Granularity));
3539 Assert( !(pCtx->ds.u32Limit & 0xfff00000)
3540 || (pCtx->ds.Attr.n.u1Granularity));
3541 Assert( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3542 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ));
3543 }
3544 if (pCtx->es.Attr.u && !(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
3545 {
3546 Assert(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3547 Assert(pCtx->es.Attr.n.u1Present);
3548 Assert(pCtx->es.Attr.n.u4Type > 11 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL));
3549 Assert(!(pCtx->es.Attr.u & 0xf00));
3550 Assert(!(pCtx->es.Attr.u & 0xfffe0000));
3551 Assert( (pCtx->es.u32Limit & 0xfff) == 0xfff
3552 || !(pCtx->es.Attr.n.u1Granularity));
3553 Assert( !(pCtx->es.u32Limit & 0xfff00000)
3554 || (pCtx->es.Attr.n.u1Granularity));
3555 Assert( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3556 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ));
3557 }
3558 if (pCtx->fs.Attr.u && !(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
3559 {
3560 Assert(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3561 Assert(pCtx->fs.Attr.n.u1Present);
3562 Assert(pCtx->fs.Attr.n.u4Type > 11 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL));
3563 Assert(!(pCtx->fs.Attr.u & 0xf00));
3564 Assert(!(pCtx->fs.Attr.u & 0xfffe0000));
3565 Assert( (pCtx->fs.u32Limit & 0xfff) == 0xfff
3566 || !(pCtx->fs.Attr.n.u1Granularity));
3567 Assert( !(pCtx->fs.u32Limit & 0xfff00000)
3568 || (pCtx->fs.Attr.n.u1Granularity));
3569 Assert( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3570 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3571 }
3572 if (pCtx->gs.Attr.u && !(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
3573 {
3574 Assert(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED);
3575 Assert(pCtx->gs.Attr.n.u1Present);
3576 Assert(pCtx->gs.Attr.n.u4Type > 11 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL));
3577 Assert(!(pCtx->gs.Attr.u & 0xf00));
3578 Assert(!(pCtx->gs.Attr.u & 0xfffe0000));
3579 Assert( (pCtx->gs.u32Limit & 0xfff) == 0xfff
3580 || !(pCtx->gs.Attr.n.u1Granularity));
3581 Assert( !(pCtx->gs.u32Limit & 0xfff00000)
3582 || (pCtx->gs.Attr.n.u1Granularity));
3583 Assert( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
3584 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ));
3585 }
3586 /* 64-bit capable CPUs. */
3587# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3588 Assert(!(pCtx->cs.u64Base >> 32));
3589 Assert(!pCtx->ss.Attr.u || !(pCtx->ss.u64Base >> 32));
3590 Assert(!pCtx->ds.Attr.u || !(pCtx->ds.u64Base >> 32));
3591 Assert(!pCtx->es.Attr.u || !(pCtx->es.u64Base >> 32));
3592# endif
3593 }
3594 else if ( CPUMIsGuestInV86ModeEx(pCtx)
3595 || ( CPUMIsGuestInRealModeEx(pCtx)
3596 && !pVM->hm.s.vmx.fUnrestrictedGuest))
3597 {
3598 /* Real and v86 mode checks. */
3599 /* hmR0VmxWriteSegmentReg() writes the modified in VMCS. We want what we're feeding to VT-x. */
3600 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
3601 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3602 {
3603 u32CSAttr = 0xf3; u32SSAttr = 0xf3; u32DSAttr = 0xf3; u32ESAttr = 0xf3; u32FSAttr = 0xf3; u32GSAttr = 0xf3;
3604 }
3605 else
3606 {
3607 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u; u32DSAttr = pCtx->ds.Attr.u;
3608 u32ESAttr = pCtx->es.Attr.u; u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
3609 }
3610
3611 /* CS */
3612 AssertMsg((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), ("CS base %#x %#x\n", pCtx->cs.u64Base, pCtx->cs.Sel));
3613 Assert(pCtx->cs.u32Limit == 0xffff);
3614 Assert(u32CSAttr == 0xf3);
3615 /* SS */
3616 Assert(pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4);
3617 Assert(pCtx->ss.u32Limit == 0xffff);
3618 Assert(u32SSAttr == 0xf3);
3619 /* DS */
3620 Assert(pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4);
3621 Assert(pCtx->ds.u32Limit == 0xffff);
3622 Assert(u32DSAttr == 0xf3);
3623 /* ES */
3624 Assert(pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4);
3625 Assert(pCtx->es.u32Limit == 0xffff);
3626 Assert(u32ESAttr == 0xf3);
3627 /* FS */
3628 Assert(pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4);
3629 Assert(pCtx->fs.u32Limit == 0xffff);
3630 Assert(u32FSAttr == 0xf3);
3631 /* GS */
3632 Assert(pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4);
3633 Assert(pCtx->gs.u32Limit == 0xffff);
3634 Assert(u32GSAttr == 0xf3);
3635 /* 64-bit capable CPUs. */
3636# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
3637 Assert(!(pCtx->cs.u64Base >> 32));
3638 Assert(!u32SSAttr || !(pCtx->ss.u64Base >> 32));
3639 Assert(!u32DSAttr || !(pCtx->ds.u64Base >> 32));
3640 Assert(!u32ESAttr || !(pCtx->es.u64Base >> 32));
3641# endif
3642 }
3643}
3644#endif /* VBOX_STRICT */
3645
3646
3647/**
3648 * Writes a guest segment register into the guest-state area in the VMCS.
3649 *
3650 * @returns VBox status code.
3651 * @param pVCpu Pointer to the VMCPU.
3652 * @param idxSel Index of the selector in the VMCS.
3653 * @param idxLimit Index of the segment limit in the VMCS.
3654 * @param idxBase Index of the segment base in the VMCS.
3655 * @param idxAccess Index of the access rights of the segment in the VMCS.
3656 * @param pSelReg Pointer to the segment selector.
3657 * @param pCtx Pointer to the guest-CPU context.
3658 *
3659 * @remarks No-long-jump zone!!!
3660 */
3661static int hmR0VmxWriteSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase,
3662 uint32_t idxAccess, PCPUMSELREG pSelReg, PCPUMCTX pCtx)
3663{
3664 int rc = VMXWriteVmcs32(idxSel, pSelReg->Sel); /* 16-bit guest selector field. */
3665 AssertRCReturn(rc, rc);
3666 rc = VMXWriteVmcs32(idxLimit, pSelReg->u32Limit); /* 32-bit guest segment limit field. */
3667 AssertRCReturn(rc, rc);
3668 rc = VMXWriteVmcsGstN(idxBase, pSelReg->u64Base); /* Natural width guest segment base field.*/
3669 AssertRCReturn(rc, rc);
3670
3671 uint32_t u32Access = pSelReg->Attr.u;
3672 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3673 {
3674 /* VT-x requires our real-using-v86 mode hack to override the segment access-right bits. */
3675 u32Access = 0xf3;
3676 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
3677 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
3678 }
3679 else
3680 {
3681 /*
3682 * The way to differentiate between whether this is really a null selector or was just a selector loaded with 0 in
3683 * real-mode is using the segment attributes. A selector loaded in real-mode with the value 0 is valid and usable in
3684 * protected-mode and we should -not- mark it as an unusable segment. Both the recompiler & VT-x ensures NULL selectors
3685 * loaded in protected-mode have their attribute as 0.
3686 */
3687 if (!u32Access)
3688 u32Access = X86DESCATTR_UNUSABLE;
3689 }
3690
3691 /* Validate segment access rights. Refer to Intel spec. "26.3.1.2 Checks on Guest Segment Registers". */
3692 AssertMsg((u32Access & X86DESCATTR_UNUSABLE) || (u32Access & X86_SEL_TYPE_ACCESSED),
3693 ("Access bit not set for usable segment. idx=%#x sel=%#x attr %#x\n", idxBase, pSelReg, pSelReg->Attr.u));
3694
3695 rc = VMXWriteVmcs32(idxAccess, u32Access); /* 32-bit guest segment access-rights field. */
3696 AssertRCReturn(rc, rc);
3697 return rc;
3698}
3699
3700
3701/**
3702 * Loads the guest segment registers, GDTR, IDTR, LDTR, (TR, FS and GS bases)
3703 * into the guest-state area in the VMCS.
3704 *
3705 * @returns VBox status code.
3706 * @param pVM Pointer to the VM.
3707 * @param pVCPU Pointer to the VMCPU.
3708 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3709 * out-of-sync. Make sure to update the required fields
3710 * before using them.
3711 *
3712 * @remarks ASSUMES CR0 is up to date (strict builds validation).
3713 * @remarks No-long-jump zone!!!
3714 */
3715static int hmR0VmxLoadGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3716{
3717 int rc = VERR_INTERNAL_ERROR_5;
3718 PVM pVM = pVCpu->CTX_SUFF(pVM);
3719
3720 /*
3721 * Guest Segment registers: CS, SS, DS, ES, FS, GS.
3722 */
3723 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SEGMENT_REGS)
3724 {
3725 /* Save the segment attributes for real-on-v86 mode hack, so we can restore them on VM-exit. */
3726 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3727 {
3728 pVCpu->hm.s.vmx.RealMode.AttrCS.u = pMixedCtx->cs.Attr.u;
3729 pVCpu->hm.s.vmx.RealMode.AttrSS.u = pMixedCtx->ss.Attr.u;
3730 pVCpu->hm.s.vmx.RealMode.AttrDS.u = pMixedCtx->ds.Attr.u;
3731 pVCpu->hm.s.vmx.RealMode.AttrES.u = pMixedCtx->es.Attr.u;
3732 pVCpu->hm.s.vmx.RealMode.AttrFS.u = pMixedCtx->fs.Attr.u;
3733 pVCpu->hm.s.vmx.RealMode.AttrGS.u = pMixedCtx->gs.Attr.u;
3734 }
3735
3736#ifdef VBOX_WITH_REM
3737 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
3738 {
3739 Assert(pVM->hm.s.vmx.pRealModeTSS);
3740 AssertCompile(PGMMODE_REAL < PGMMODE_PROTECTED);
3741 if ( pVCpu->hm.s.vmx.fWasInRealMode
3742 && PGMGetGuestMode(pVCpu) >= PGMMODE_PROTECTED)
3743 {
3744 /* Signal that the recompiler must flush its code-cache as the guest -may- rewrite code it will later execute
3745 in real-mode (e.g. OpenBSD 4.0) */
3746 REMFlushTBs(pVM);
3747 Log4(("Load: Switch to protected mode detected!\n"));
3748 pVCpu->hm.s.vmx.fWasInRealMode = false;
3749 }
3750 }
3751#endif
3752 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_CS, VMX_VMCS32_GUEST_CS_LIMIT, VMX_VMCS_GUEST_CS_BASE,
3753 VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS, &pMixedCtx->cs, pMixedCtx);
3754 AssertRCReturn(rc, rc);
3755 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_SS, VMX_VMCS32_GUEST_SS_LIMIT, VMX_VMCS_GUEST_SS_BASE,
3756 VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS, &pMixedCtx->ss, pMixedCtx);
3757 AssertRCReturn(rc, rc);
3758 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_DS, VMX_VMCS32_GUEST_DS_LIMIT, VMX_VMCS_GUEST_DS_BASE,
3759 VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS, &pMixedCtx->ds, pMixedCtx);
3760 AssertRCReturn(rc, rc);
3761 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_ES, VMX_VMCS32_GUEST_ES_LIMIT, VMX_VMCS_GUEST_ES_BASE,
3762 VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS, &pMixedCtx->es, pMixedCtx);
3763 AssertRCReturn(rc, rc);
3764 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_FS, VMX_VMCS32_GUEST_FS_LIMIT, VMX_VMCS_GUEST_FS_BASE,
3765 VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS, &pMixedCtx->fs, pMixedCtx);
3766 AssertRCReturn(rc, rc);
3767 rc = hmR0VmxWriteSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_GS, VMX_VMCS32_GUEST_GS_LIMIT, VMX_VMCS_GUEST_GS_BASE,
3768 VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS, &pMixedCtx->gs, pMixedCtx);
3769 AssertRCReturn(rc, rc);
3770
3771 Log4(("Load: CS=%#RX16 Base=%#RX64 Limit=%#RX32 Attr=%#RX32\n", pMixedCtx->cs.Sel, pMixedCtx->cs.u64Base,
3772 pMixedCtx->cs.u32Limit, pMixedCtx->cs.Attr.u));
3773#ifdef VBOX_STRICT
3774 hmR0VmxValidateSegmentRegs(pVM, pVCpu, pMixedCtx);
3775#endif
3776 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SEGMENT_REGS;
3777 }
3778
3779 /*
3780 * Guest TR.
3781 */
3782 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_TR)
3783 {
3784 /*
3785 * Real-mode emulation using virtual-8086 mode with CR4.VME. Interrupt redirection is achieved
3786 * using the interrupt redirection bitmap (all bits cleared to let the guest handle INT-n's) in the TSS.
3787 * See hmR3InitFinalizeR0() to see how pRealModeTSS is setup.
3788 */
3789 uint16_t u16Sel = 0;
3790 uint32_t u32Limit = 0;
3791 uint64_t u64Base = 0;
3792 uint32_t u32AccessRights = 0;
3793
3794 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
3795 {
3796 u16Sel = pMixedCtx->tr.Sel;
3797 u32Limit = pMixedCtx->tr.u32Limit;
3798 u64Base = pMixedCtx->tr.u64Base;
3799 u32AccessRights = pMixedCtx->tr.Attr.u;
3800 }
3801 else
3802 {
3803 Assert(pVM->hm.s.vmx.pRealModeTSS);
3804 Assert(PDMVmmDevHeapIsEnabled(pVM)); /* Guaranteed by HMR3CanExecuteGuest() -XXX- what about inner loop changes? */
3805
3806 /* We obtain it here every time as PCI regions could be reconfigured in the guest, changing the VMMDev base. */
3807 RTGCPHYS GCPhys;
3808 rc = PDMVmmDevHeapR3ToGCPhys(pVM, pVM->hm.s.vmx.pRealModeTSS, &GCPhys);
3809 AssertRCReturn(rc, rc);
3810
3811 X86DESCATTR DescAttr;
3812 DescAttr.u = 0;
3813 DescAttr.n.u1Present = 1;
3814 DescAttr.n.u4Type = X86_SEL_TYPE_SYS_386_TSS_BUSY;
3815
3816 u16Sel = 0;
3817 u32Limit = HM_VTX_TSS_SIZE;
3818 u64Base = GCPhys; /* in real-mode phys = virt. */
3819 u32AccessRights = DescAttr.u;
3820 }
3821
3822 /* Validate. */
3823 Assert(!(u16Sel & RT_BIT(2)));
3824 AssertMsg( (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_386_TSS_BUSY
3825 || (u32AccessRights & 0xf) == X86_SEL_TYPE_SYS_286_TSS_BUSY, ("TSS is not busy!? %#x\n", u32AccessRights));
3826 AssertMsg(!(u32AccessRights & X86DESCATTR_UNUSABLE), ("TR unusable bit is not clear!? %#x\n", u32AccessRights));
3827 Assert(!(u32AccessRights & RT_BIT(4))); /* System MBZ.*/
3828 Assert(u32AccessRights & RT_BIT(7)); /* Present MB1.*/
3829 Assert(!(u32AccessRights & 0xf00)); /* 11:8 MBZ. */
3830 Assert(!(u32AccessRights & 0xfffe0000)); /* 31:17 MBZ. */
3831 Assert( (u32Limit & 0xfff) == 0xfff
3832 || !(u32AccessRights & RT_BIT(15))); /* Granularity MBZ. */
3833 Assert( !(pMixedCtx->tr.u32Limit & 0xfff00000)
3834 || (u32AccessRights & RT_BIT(15))); /* Granularity MB1. */
3835
3836 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_TR, u16Sel); AssertRCReturn(rc, rc);
3837 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_LIMIT, u32Limit); AssertRCReturn(rc, rc);
3838 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_TR_BASE, u64Base); AssertRCReturn(rc, rc);
3839 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS, u32AccessRights); AssertRCReturn(rc, rc);
3840
3841 Log4(("Load: VMX_VMCS_GUEST_TR_BASE=%#RX64\n", u64Base));
3842 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_TR;
3843 }
3844
3845 /*
3846 * Guest GDTR.
3847 */
3848 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_GDTR)
3849 {
3850 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, pMixedCtx->gdtr.cbGdt); AssertRCReturn(rc, rc);
3851 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, pMixedCtx->gdtr.pGdt); AssertRCReturn(rc, rc);
3852
3853 Assert(!(pMixedCtx->gdtr.cbGdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3854 Log4(("Load: VMX_VMCS_GUEST_GDTR_BASE=%#RX64\n", pMixedCtx->gdtr.pGdt));
3855 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_GDTR;
3856 }
3857
3858 /*
3859 * Guest LDTR.
3860 */
3861 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_LDTR)
3862 {
3863 /* The unusable bit is specific to VT-x, if it's a null selector mark it as an unusable segment. */
3864 uint32_t u32Access = 0;
3865 if (!pMixedCtx->ldtr.Attr.u)
3866 u32Access = X86DESCATTR_UNUSABLE;
3867 else
3868 u32Access = pMixedCtx->ldtr.Attr.u;
3869
3870 rc = VMXWriteVmcs32(VMX_VMCS16_GUEST_FIELD_LDTR, pMixedCtx->ldtr.Sel); AssertRCReturn(rc, rc);
3871 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_LIMIT, pMixedCtx->ldtr.u32Limit); AssertRCReturn(rc, rc);
3872 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_LDTR_BASE, pMixedCtx->ldtr.u64Base); AssertRCReturn(rc, rc);
3873 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS, u32Access); AssertRCReturn(rc, rc);
3874
3875 /* Validate. */
3876 if (!(u32Access & X86DESCATTR_UNUSABLE))
3877 {
3878 Assert(!(pMixedCtx->ldtr.Sel & RT_BIT(2))); /* TI MBZ. */
3879 Assert(pMixedCtx->ldtr.Attr.n.u4Type == 2); /* Type MB2 (LDT). */
3880 Assert(!pMixedCtx->ldtr.Attr.n.u1DescType); /* System MBZ. */
3881 Assert(pMixedCtx->ldtr.Attr.n.u1Present == 1); /* Present MB1. */
3882 Assert(!pMixedCtx->ldtr.Attr.n.u4LimitHigh); /* 11:8 MBZ. */
3883 Assert(!(pMixedCtx->ldtr.Attr.u & 0xfffe0000)); /* 31:17 MBZ. */
3884 Assert( (pMixedCtx->ldtr.u32Limit & 0xfff) == 0xfff
3885 || !pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MBZ. */
3886 Assert( !(pMixedCtx->ldtr.u32Limit & 0xfff00000)
3887 || pMixedCtx->ldtr.Attr.n.u1Granularity); /* Granularity MB1. */
3888 }
3889
3890 Log4(("Load: VMX_VMCS_GUEST_LDTR_BASE=%#RX64\n", pMixedCtx->ldtr.u64Base));
3891 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_LDTR;
3892 }
3893
3894 /*
3895 * Guest IDTR.
3896 */
3897 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_IDTR)
3898 {
3899 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, pMixedCtx->idtr.cbIdt); AssertRCReturn(rc, rc);
3900 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, pMixedCtx->idtr.pIdt); AssertRCReturn(rc, rc);
3901
3902 Assert(!(pMixedCtx->idtr.cbIdt & 0xffff0000)); /* Bits 31:16 MBZ. */
3903 Log4(("Load: VMX_VMCS_GUEST_IDTR_BASE=%#RX64\n", pMixedCtx->idtr.pIdt));
3904 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_IDTR;
3905 }
3906
3907 return VINF_SUCCESS;
3908}
3909
3910
3911/**
3912 * Loads certain guest MSRs into the VM-entry MSR-load and VM-exit MSR-store
3913 * areas. These MSRs will automatically be loaded to the host CPU on every
3914 * successful VM entry and stored from the host CPU on every successful VM exit.
3915 * Also loads the sysenter MSRs into the guest-state area in the VMCS.
3916 *
3917 * @returns VBox status code.
3918 * @param pVCpu Pointer to the VMCPU.
3919 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
3920 * out-of-sync. Make sure to update the required fields
3921 * before using them.
3922 *
3923 * @remarks No-long-jump zone!!!
3924 */
3925static int hmR0VmxLoadGuestMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
3926{
3927 AssertPtr(pVCpu);
3928 AssertPtr(pVCpu->hm.s.vmx.pvGuestMsr);
3929
3930 /*
3931 * MSRs covered by Auto-load/store: EFER, LSTAR, STAR, SF_MASK, TSC_AUX (RDTSCP).
3932 */
3933 int rc = VINF_SUCCESS;
3934 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_AUTO_MSRS)
3935 {
3936#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
3937 PVM pVM = pVCpu->CTX_SUFF(pVM);
3938 PVMXMSR pGuestMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
3939 uint32_t cGuestMsrs = 0;
3940
3941 /* See Intel spec. 4.1.4 "Enumeration of Paging Features by CPUID". */
3942 /** @todo r=ramshankar: Optimize this further to do lazy restoration and only
3943 * when the guest really is in 64-bit mode. */
3944 bool fSupportsLongMode = CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_LONG_MODE);
3945 if (fSupportsLongMode)
3946 {
3947 pGuestMsr->u32IndexMSR = MSR_K8_LSTAR;
3948 pGuestMsr->u32Reserved = 0;
3949 pGuestMsr->u64Value = pMixedCtx->msrLSTAR; /* 64 bits mode syscall rip */
3950 pGuestMsr++; cGuestMsrs++;
3951 pGuestMsr->u32IndexMSR = MSR_K6_STAR;
3952 pGuestMsr->u32Reserved = 0;
3953 pGuestMsr->u64Value = pMixedCtx->msrSTAR; /* legacy syscall eip, cs & ss */
3954 pGuestMsr++; cGuestMsrs++;
3955 pGuestMsr->u32IndexMSR = MSR_K8_SF_MASK;
3956 pGuestMsr->u32Reserved = 0;
3957 pGuestMsr->u64Value = pMixedCtx->msrSFMASK; /* syscall flag mask */
3958 pGuestMsr++; cGuestMsrs++;
3959 pGuestMsr->u32IndexMSR = MSR_K8_KERNEL_GS_BASE;
3960 pGuestMsr->u32Reserved = 0;
3961 pGuestMsr->u64Value = pMixedCtx->msrKERNELGSBASE; /* swapgs exchange value */
3962 pGuestMsr++; cGuestMsrs++;
3963 }
3964
3965 /*
3966 * RDTSCP requires the TSC_AUX MSR. Host and guest share the physical MSR. So we have to
3967 * load the guest's copy if the guest can execute RDTSCP without causing VM-exits.
3968 */
3969 if ( CPUMGetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_RDTSCP)
3970 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP))
3971 {
3972 pGuestMsr->u32IndexMSR = MSR_K8_TSC_AUX;
3973 pGuestMsr->u32Reserved = 0;
3974 rc = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &pGuestMsr->u64Value);
3975 AssertRCReturn(rc, rc);
3976 pGuestMsr++; cGuestMsrs++;
3977 }
3978
3979 /* Shouldn't ever happen but there -is- a number. We're well within the recommended 512. */
3980 if (cGuestMsrs > MSR_IA32_VMX_MISC_MAX_MSR(pVM->hm.s.vmx.msr.vmx_misc))
3981 {
3982 LogRel(("CPU autoload/store MSR count in VMCS exceeded cGuestMsrs=%u.\n", cGuestMsrs));
3983 pVCpu->hm.s.u32HMError = VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE;
3984 return VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO;
3985 }
3986
3987 /* Update the VCPU's copy of the guest MSR count. */
3988 pVCpu->hm.s.vmx.cGuestMsrs = cGuestMsrs;
3989 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3990 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, cGuestMsrs); AssertRCReturn(rc, rc);
3991#endif /* VBOX_WITH_AUTO_MSR_LOAD_RESTORE */
3992
3993 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_AUTO_MSRS;
3994 }
3995
3996 /*
3997 * Guest Sysenter MSRs.
3998 * These flags are only set when MSR-bitmaps are not supported by the CPU and we cause
3999 * VM-exits on WRMSRs for these MSRs.
4000 */
4001 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_CS_MSR)
4002 {
4003 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, pMixedCtx->SysEnter.cs); AssertRCReturn(rc, rc);
4004 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_CS_MSR;
4005 }
4006 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_EIP_MSR)
4007 {
4008 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, pMixedCtx->SysEnter.eip); AssertRCReturn(rc, rc);
4009 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_EIP_MSR;
4010 }
4011 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_SYSENTER_ESP_MSR)
4012 {
4013 rc = VMXWriteVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, pMixedCtx->SysEnter.esp); AssertRCReturn(rc, rc);
4014 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_SYSENTER_ESP_MSR;
4015 }
4016
4017 return rc;
4018}
4019
4020
4021/**
4022 * Loads the guest activity state into the guest-state area in the VMCS.
4023 *
4024 * @returns VBox status code.
4025 * @param pVCpu Pointer to the VMCPU.
4026 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4027 * out-of-sync. Make sure to update the required fields
4028 * before using them.
4029 *
4030 * @remarks No-long-jump zone!!!
4031 */
4032static int hmR0VmxLoadGuestActivityState(PVMCPU pVCpu, PCPUMCTX pCtx)
4033{
4034 /** @todo See if we can make use of other states, e.g.
4035 * VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN or HLT. */
4036 int rc = VINF_SUCCESS;
4037 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_VMX_GUEST_ACTIVITY_STATE)
4038 {
4039 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, VMX_VMCS_GUEST_ACTIVITY_ACTIVE);
4040 AssertRCReturn(rc, rc);
4041 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_VMX_GUEST_ACTIVITY_STATE;
4042 }
4043 return rc;
4044}
4045
4046
4047/**
4048 * Sets up the appropriate function to run guest code.
4049 *
4050 * @returns VBox status code.
4051 * @param pVCpu Pointer to the VMCPU.
4052 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4053 * out-of-sync. Make sure to update the required fields
4054 * before using them.
4055 *
4056 * @remarks No-long-jump zone!!!
4057 */
4058static int hmR0VmxSetupVMRunHandler(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4059{
4060 if (CPUMIsGuestInLongModeEx(pMixedCtx))
4061 {
4062#ifndef VBOX_ENABLE_64_BITS_GUESTS
4063 return VERR_PGM_UNSUPPORTED_SHADOW_PAGING_MODE;
4064#endif
4065 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests); /* Guaranteed by hmR3InitFinalizeR0(). */
4066#if HC_ARCH_BITS == 32 && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4067 /* 32-bit host. We need to switch to 64-bit before running the 64-bit guest. */
4068 pVCpu->hm.s.vmx.pfnStartVM = VMXR0SwitcherStartVM64;
4069#else
4070 /* 64-bit host or hybrid host. */
4071 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM64;
4072#endif
4073 }
4074 else
4075 {
4076 /* Guest is not in long mode, use the 32-bit handler. */
4077 pVCpu->hm.s.vmx.pfnStartVM = VMXR0StartVM32;
4078 }
4079 Assert(pVCpu->hm.s.vmx.pfnStartVM);
4080 return VINF_SUCCESS;
4081}
4082
4083
4084/**
4085 * Wrapper for running the guest code in VT-x.
4086 *
4087 * @returns VBox strict status code.
4088 * @param pVM Pointer to the VM.
4089 * @param pVCpu Pointer to the VMCPU.
4090 * @param pCtx Pointer to the guest-CPU context.
4091 *
4092 * @remarks No-long-jump zone!!!
4093 */
4094DECLINLINE(int) hmR0VmxRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
4095{
4096 /*
4097 * 64-bit Windows uses XMM registers in the kernel as the Microsoft compiler expresses floating-point operations
4098 * using SSE instructions. Some XMM registers (XMM6-XMM15) are callee-saved and thus the need for this XMM wrapper.
4099 * Refer MSDN docs. "Configuring Programs for 64-bit / x64 Software Conventions / Register Usage" for details.
4100 */
4101 const bool fResumeVM = !!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED);
4102 /** @todo Add stats for resume vs launch. */
4103#ifdef VBOX_WITH_KERNEL_USING_XMM
4104 return HMR0VMXStartVMWrapXMM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu, pVCpu->hm.s.vmx.pfnStartVM);
4105#else
4106 return pVCpu->hm.s.vmx.pfnStartVM(fResumeVM, pCtx, &pVCpu->hm.s.vmx.VMCSCache, pVM, pVCpu);
4107#endif
4108}
4109
4110
4111/**
4112 * Reports world-switch error and dumps some useful debug info.
4113 *
4114 * @param pVM Pointer to the VM.
4115 * @param pVCpu Pointer to the VMCPU.
4116 * @param rcVMRun The return code from VMLAUNCH/VMRESUME.
4117 * @param pCtx Pointer to the guest-CPU context.
4118 * @param pVmxTransient Pointer to the VMX transient structure (only
4119 * exitReason updated).
4120 */
4121static void hmR0VmxReportWorldSwitchError(PVM pVM, PVMCPU pVCpu, int rcVMRun, PCPUMCTX pCtx, PVMXTRANSIENT pVmxTransient)
4122{
4123 Assert(pVM);
4124 Assert(pVCpu);
4125 Assert(pCtx);
4126 Assert(pVmxTransient);
4127 HMVMX_ASSERT_PREEMPT_SAFE();
4128
4129 Log4(("VM-entry failure: %Rrc\n", rcVMRun));
4130 switch (rcVMRun)
4131 {
4132 case VERR_VMX_INVALID_VMXON_PTR:
4133 AssertFailed();
4134 break;
4135 case VINF_SUCCESS: /* VMLAUNCH/VMRESUME succeeded but VM-entry failed... yeah, true story. */
4136 case VERR_VMX_UNABLE_TO_START_VM: /* VMLAUNCH/VMRESUME itself failed. */
4137 {
4138 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &pVCpu->hm.s.vmx.LastError.u32ExitReason);
4139 rc |= VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &pVCpu->hm.s.vmx.LastError.u32InstrError);
4140 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
4141 AssertRC(rc);
4142
4143 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
4144 /* LastError.idCurrentCpu was already updated in hmR0VmxPreRunGuestCommitted().
4145 Cannot do it here as we may have been long preempted. */
4146
4147#ifdef VBOX_STRICT
4148 Log4(("uExitReason %#RX32 (VmxTransient %#RX16)\n", pVCpu->hm.s.vmx.LastError.u32ExitReason,
4149 pVmxTransient->uExitReason));
4150 Log4(("Exit Qualification %#RX64\n", pVmxTransient->uExitQualification));
4151 Log4(("InstrError %#RX32\n", pVCpu->hm.s.vmx.LastError.u32InstrError));
4152 if (pVCpu->hm.s.vmx.LastError.u32InstrError <= HMVMX_INSTR_ERROR_MAX)
4153 Log4(("InstrError Desc. \"%s\"\n", g_apszVmxInstrErrors[pVCpu->hm.s.vmx.LastError.u32InstrError]));
4154 else
4155 Log4(("InstrError Desc. Range exceeded %u\n", HMVMX_INSTR_ERROR_MAX));
4156 Log4(("Entered host CPU %u\n", pVCpu->hm.s.vmx.LastError.idEnteredCpu));
4157 Log4(("Current host CPU %u\n", pVCpu->hm.s.vmx.LastError.idCurrentCpu));
4158
4159 /* VMX control bits. */
4160 uint32_t u32Val;
4161 uint64_t u64Val;
4162 HMVMXHCUINTREG uHCReg;
4163 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PIN_EXEC, &u32Val); AssertRC(rc);
4164 Log4(("VMX_VMCS32_CTRL_PIN_EXEC %#RX32\n", u32Val));
4165 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val); AssertRC(rc);
4166 Log4(("VMX_VMCS32_CTRL_PROC_EXEC %#RX32\n", u32Val));
4167 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC2, &u32Val); AssertRC(rc);
4168 Log4(("VMX_VMCS32_CTRL_PROC_EXEC2 %#RX32\n", u32Val));
4169 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY, &u32Val); AssertRC(rc);
4170 Log4(("VMX_VMCS32_CTRL_ENTRY %#RX32\n", u32Val));
4171 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT, &u32Val); AssertRC(rc);
4172 Log4(("VMX_VMCS32_CTRL_EXIT %#RX32\n", u32Val));
4173 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_CR3_TARGET_COUNT, &u32Val); AssertRC(rc);
4174 Log4(("VMX_VMCS32_CTRL_CR3_TARGET_COUNT %#RX32\n", u32Val));
4175 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32Val); AssertRC(rc);
4176 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", u32Val));
4177 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, &u32Val); AssertRC(rc);
4178 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", u32Val));
4179 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, &u32Val); AssertRC(rc);
4180 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %u\n", u32Val));
4181 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_TPR_THRESHOLD, &u32Val); AssertRC(rc);
4182 Log4(("VMX_VMCS32_CTRL_TPR_THRESHOLD %u\n", u32Val));
4183 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT, &u32Val); AssertRC(rc);
4184 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT %u (guest MSRs)\n", u32Val));
4185 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4186 Log4(("VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT %u (host MSRs)\n", u32Val));
4187 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT, &u32Val); AssertRC(rc);
4188 Log4(("VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT %u (guest MSRs)\n", u32Val));
4189 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, &u32Val); AssertRC(rc);
4190 Log4(("VMX_VMCS32_CTRL_EXCEPTION_BITMAP %#RX32\n", u32Val));
4191 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK, &u32Val); AssertRC(rc);
4192 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK %#RX32\n", u32Val));
4193 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH, &u32Val); AssertRC(rc);
4194 Log4(("VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH %#RX32\n", u32Val));
4195 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
4196 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
4197 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
4198 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4199 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
4200 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
4201 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
4202 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
4203 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
4204 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
4205
4206 /* Guest bits. */
4207 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val); AssertRC(rc);
4208 Log4(("Old Guest Rip %#RX64 New %#RX64\n", pCtx->rip, u64Val));
4209 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val); AssertRC(rc);
4210 Log4(("Old Guest Rsp %#RX64 New %#RX64\n", pCtx->rsp, u64Val));
4211 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Val); AssertRC(rc);
4212 Log4(("Old Guest Rflags %#RX32 New %#RX32\n", pCtx->eflags.u32, u32Val));
4213 rc = VMXReadVmcs32(VMX_VMCS16_GUEST_FIELD_VPID, &u32Val); AssertRC(rc);
4214 Log4(("VMX_VMCS16_GUEST_FIELD_VPID %u\n", u32Val));
4215
4216 /* Host bits. */
4217 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR0, &uHCReg); AssertRC(rc);
4218 Log4(("Host CR0 %#RHr\n", uHCReg));
4219 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR3, &uHCReg); AssertRC(rc);
4220 Log4(("Host CR3 %#RHr\n", uHCReg));
4221 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_CR4, &uHCReg); AssertRC(rc);
4222 Log4(("Host CR4 %#RHr\n", uHCReg));
4223
4224 RTGDTR HostGdtr;
4225 PCX86DESCHC pDesc;
4226 ASMGetGDTR(&HostGdtr);
4227 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_CS, &u32Val); AssertRC(rc);
4228 Log4(("Host CS %#08x\n", u32Val));
4229 if (u32Val < HostGdtr.cbGdt)
4230 {
4231 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4232 HMR0DumpDescriptor(pDesc, u32Val, "CS: ");
4233 }
4234
4235 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_DS, &u32Val); AssertRC(rc);
4236 Log4(("Host DS %#08x\n", u32Val));
4237 if (u32Val < HostGdtr.cbGdt)
4238 {
4239 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4240 HMR0DumpDescriptor(pDesc, u32Val, "DS: ");
4241 }
4242
4243 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_ES, &u32Val); AssertRC(rc);
4244 Log4(("Host ES %#08x\n", u32Val));
4245 if (u32Val < HostGdtr.cbGdt)
4246 {
4247 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4248 HMR0DumpDescriptor(pDesc, u32Val, "ES: ");
4249 }
4250
4251 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_FS, &u32Val); AssertRC(rc);
4252 Log4(("Host FS %#08x\n", u32Val));
4253 if (u32Val < HostGdtr.cbGdt)
4254 {
4255 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4256 HMR0DumpDescriptor(pDesc, u32Val, "FS: ");
4257 }
4258
4259 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_GS, &u32Val); AssertRC(rc);
4260 Log4(("Host GS %#08x\n", u32Val));
4261 if (u32Val < HostGdtr.cbGdt)
4262 {
4263 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4264 HMR0DumpDescriptor(pDesc, u32Val, "GS: ");
4265 }
4266
4267 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_SS, &u32Val); AssertRC(rc);
4268 Log4(("Host SS %#08x\n", u32Val));
4269 if (u32Val < HostGdtr.cbGdt)
4270 {
4271 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4272 HMR0DumpDescriptor(pDesc, u32Val, "SS: ");
4273 }
4274
4275 rc = VMXReadVmcs32(VMX_VMCS16_HOST_FIELD_TR, &u32Val); AssertRC(rc);
4276 Log4(("Host TR %#08x\n", u32Val));
4277 if (u32Val < HostGdtr.cbGdt)
4278 {
4279 pDesc = (PCX86DESCHC)(HostGdtr.pGdt + (u32Val & X86_SEL_MASK));
4280 HMR0DumpDescriptor(pDesc, u32Val, "TR: ");
4281 }
4282
4283 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_TR_BASE, &uHCReg); AssertRC(rc);
4284 Log4(("Host TR Base %#RHv\n", uHCReg));
4285 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_GDTR_BASE, &uHCReg); AssertRC(rc);
4286 Log4(("Host GDTR Base %#RHv\n", uHCReg));
4287 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_IDTR_BASE, &uHCReg); AssertRC(rc);
4288 Log4(("Host IDTR Base %#RHv\n", uHCReg));
4289 rc = VMXReadVmcs32(VMX_VMCS32_HOST_SYSENTER_CS, &u32Val); AssertRC(rc);
4290 Log4(("Host SYSENTER CS %#08x\n", u32Val));
4291 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_EIP, &uHCReg); AssertRC(rc);
4292 Log4(("Host SYSENTER EIP %#RHv\n", uHCReg));
4293 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_SYSENTER_ESP, &uHCReg); AssertRC(rc);
4294 Log4(("Host SYSENTER ESP %#RHv\n", uHCReg));
4295 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RSP, &uHCReg); AssertRC(rc);
4296 Log4(("Host RSP %#RHv\n", uHCReg));
4297 rc = VMXReadVmcsHstN(VMX_VMCS_HOST_RIP, &uHCReg); AssertRC(rc);
4298 Log4(("Host RIP %#RHv\n", uHCReg));
4299# if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4300 if (HMVMX_IS_64BIT_HOST_MODE())
4301 {
4302 Log4(("MSR_K6_EFER = %#RX64\n", ASMRdMsr(MSR_K6_EFER)));
4303 Log4(("MSR_K6_STAR = %#RX64\n", ASMRdMsr(MSR_K6_STAR)));
4304 Log4(("MSR_K8_LSTAR = %#RX64\n", ASMRdMsr(MSR_K8_LSTAR)));
4305 Log4(("MSR_K8_CSTAR = %#RX64\n", ASMRdMsr(MSR_K8_CSTAR)));
4306 Log4(("MSR_K8_SF_MASK = %#RX64\n", ASMRdMsr(MSR_K8_SF_MASK)));
4307 Log4(("MSR_K8_KERNEL_GS_BASE = %#RX64\n", ASMRdMsr(MSR_K8_KERNEL_GS_BASE)));
4308 }
4309# endif
4310#endif /* VBOX_STRICT */
4311 break;
4312 }
4313
4314 default:
4315 /* Impossible */
4316 AssertMsgFailed(("hmR0VmxReportWorldSwitchError %Rrc (%#x)\n", rcVMRun, rcVMRun));
4317 break;
4318 }
4319 NOREF(pVM);
4320}
4321
4322
4323#if HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
4324#ifndef VMX_USE_CACHED_VMCS_ACCESSES
4325# error "VMX_USE_CACHED_VMCS_ACCESSES not defined when it should be!"
4326#endif
4327#ifdef VBOX_STRICT
4328static bool hmR0VmxIsValidWriteField(uint32_t idxField)
4329{
4330 switch (idxField)
4331 {
4332 case VMX_VMCS_GUEST_RIP:
4333 case VMX_VMCS_GUEST_RSP:
4334 case VMX_VMCS_GUEST_SYSENTER_EIP:
4335 case VMX_VMCS_GUEST_SYSENTER_ESP:
4336 case VMX_VMCS_GUEST_GDTR_BASE:
4337 case VMX_VMCS_GUEST_IDTR_BASE:
4338 case VMX_VMCS_GUEST_CS_BASE:
4339 case VMX_VMCS_GUEST_DS_BASE:
4340 case VMX_VMCS_GUEST_ES_BASE:
4341 case VMX_VMCS_GUEST_FS_BASE:
4342 case VMX_VMCS_GUEST_GS_BASE:
4343 case VMX_VMCS_GUEST_SS_BASE:
4344 case VMX_VMCS_GUEST_LDTR_BASE:
4345 case VMX_VMCS_GUEST_TR_BASE:
4346 case VMX_VMCS_GUEST_CR3:
4347 return true;
4348 }
4349 return false;
4350}
4351
4352static bool hmR0VmxIsValidReadField(uint32_t idxField)
4353{
4354 switch (idxField)
4355 {
4356 /* Read-only fields. */
4357 case VMX_VMCS_RO_EXIT_QUALIFICATION:
4358 return true;
4359 }
4360 /* Remaining readable fields should also be writable. */
4361 return hmR0VmxIsValidWriteField(idxField);
4362}
4363#endif /* VBOX_STRICT */
4364
4365
4366/**
4367 * Executes the specified handler in 64-bit mode.
4368 *
4369 * @returns VBox status code.
4370 * @param pVM Pointer to the VM.
4371 * @param pVCpu Pointer to the VMCPU.
4372 * @param pCtx Pointer to the guest CPU context.
4373 * @param enmOp The operation to perform.
4374 * @param cbParam Number of parameters.
4375 * @param paParam Array of 32-bit parameters.
4376 */
4377VMMR0DECL(int) VMXR0Execute64BitsHandler(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, HM64ON32OP enmOp, uint32_t cbParam,
4378 uint32_t *paParam)
4379{
4380 int rc, rc2;
4381 PHMGLOBALCPUINFO pCpu;
4382 RTHCPHYS HCPhysCpuPage;
4383 RTCCUINTREG uOldEflags;
4384
4385 AssertReturn(pVM->hm.s.pfnHost32ToGuest64R0, VERR_HM_NO_32_TO_64_SWITCHER);
4386 Assert(enmOp > HM64ON32OP_INVALID && enmOp < HM64ON32OP_END);
4387 Assert(pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Write.aField));
4388 Assert(pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries <= RT_ELEMENTS(pVCpu->hm.s.vmx.VMCSCache.Read.aField));
4389
4390#ifdef VBOX_STRICT
4391 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.VMCSCache.Write.cValidEntries; i++)
4392 Assert(hmR0VmxIsValidWriteField(pVCpu->hm.s.vmx.VMCSCache.Write.aField[i]));
4393
4394 for (uint32_t i = 0; i <pVCpu->hm.s.vmx.VMCSCache.Read.cValidEntries; i++)
4395 Assert(hmR0VmxIsValidReadField(pVCpu->hm.s.vmx.VMCSCache.Read.aField[i]));
4396#endif
4397
4398 /* Disable interrupts. */
4399 uOldEflags = ASMIntDisableFlags();
4400
4401#ifdef VBOX_WITH_VMMR0_DISABLE_LAPIC_NMI
4402 RTCPUID idHostCpu = RTMpCpuId();
4403 CPUMR0SetLApic(pVCpu, idHostCpu);
4404#endif
4405
4406 pCpu = HMR0GetCurrentCpu();
4407 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4408
4409 /* Clear VMCS. Marking it inactive, clearing implementation-specific data and writing VMCS data back to memory. */
4410 VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
4411
4412 /* Leave VMX Root Mode. */
4413 VMXDisable();
4414
4415 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4416
4417 CPUMSetHyperESP(pVCpu, VMMGetStackRC(pVCpu));
4418 CPUMSetHyperEIP(pVCpu, enmOp);
4419 for (int i = (int)cbParam - 1; i >= 0; i--)
4420 CPUMPushHyper(pVCpu, paParam[i]);
4421
4422 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatWorldSwitch3264, z);
4423
4424 /* Call the switcher. */
4425 rc = pVM->hm.s.pfnHost32ToGuest64R0(pVM, RT_OFFSETOF(VM, aCpus[pVCpu->idCpu].cpum) - RT_OFFSETOF(VM, cpum));
4426 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatWorldSwitch3264, z);
4427
4428 /** @todo replace with hmR0VmxEnterRootMode() and hmR0VmxLeaveRootMode(). */
4429 /* Make sure the VMX instructions don't cause #UD faults. */
4430 ASMSetCR4(ASMGetCR4() | X86_CR4_VMXE);
4431
4432 /* Re-enter VMX Root Mode */
4433 rc2 = VMXEnable(HCPhysCpuPage);
4434 if (RT_FAILURE(rc2))
4435 {
4436 ASMSetCR4(ASMGetCR4() & ~X86_CR4_VMXE);
4437 ASMSetFlags(uOldEflags);
4438 return rc2;
4439 }
4440
4441 rc2 = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
4442 AssertRC(rc2);
4443 Assert(!(ASMGetFlags() & X86_EFL_IF));
4444 ASMSetFlags(uOldEflags);
4445 return rc;
4446}
4447
4448
4449/**
4450 * Prepares for and executes VMLAUNCH (64 bits guests) for 32-bit hosts
4451 * supporting 64-bit guests.
4452 *
4453 * @returns VBox status code.
4454 * @param fResume Whether to VMLAUNCH or VMRESUME.
4455 * @param pCtx Pointer to the guest-CPU context.
4456 * @param pCache Pointer to the VMCS cache.
4457 * @param pVM Pointer to the VM.
4458 * @param pVCpu Pointer to the VMCPU.
4459 */
4460DECLASM(int) VMXR0SwitcherStartVM64(RTHCUINT fResume, PCPUMCTX pCtx, PVMCSCACHE pCache, PVM pVM, PVMCPU pVCpu)
4461{
4462 uint32_t aParam[6];
4463 PHMGLOBALCPUINFO pCpu = NULL;
4464 RTHCPHYS HCPhysCpuPage = 0;
4465 int rc = VERR_INTERNAL_ERROR_5;
4466
4467 pCpu = HMR0GetCurrentCpu();
4468 HCPhysCpuPage = RTR0MemObjGetPagePhysAddr(pCpu->hMemObj, 0);
4469
4470#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4471 pCache->uPos = 1;
4472 pCache->interPD = PGMGetInterPaeCR3(pVM);
4473 pCache->pSwitcher = (uint64_t)pVM->hm.s.pfnHost32ToGuest64R0;
4474#endif
4475
4476#ifdef VBOX_STRICT
4477 pCache->TestIn.HCPhysCpuPage = 0;
4478 pCache->TestIn.HCPhysVmcs = 0;
4479 pCache->TestIn.pCache = 0;
4480 pCache->TestOut.HCPhysVmcs = 0;
4481 pCache->TestOut.pCache = 0;
4482 pCache->TestOut.pCtx = 0;
4483 pCache->TestOut.eflags = 0;
4484#endif
4485
4486 aParam[0] = (uint32_t)(HCPhysCpuPage); /* Param 1: VMXON physical address - Lo. */
4487 aParam[1] = (uint32_t)(HCPhysCpuPage >> 32); /* Param 1: VMXON physical address - Hi. */
4488 aParam[2] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs); /* Param 2: VMCS physical address - Lo. */
4489 aParam[3] = (uint32_t)(pVCpu->hm.s.vmx.HCPhysVmcs >> 32); /* Param 2: VMCS physical address - Hi. */
4490 aParam[4] = VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache);
4491 aParam[5] = 0;
4492
4493#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4494 pCtx->dr[4] = pVM->hm.s.vmx.pScratchPhys + 16 + 8;
4495 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 1;
4496#endif
4497 rc = VMXR0Execute64BitsHandler(pVM, pVCpu, pCtx, HM64ON32OP_VMXRCStartVM64, 6, &aParam[0]);
4498
4499#ifdef VBOX_WITH_CRASHDUMP_MAGIC
4500 Assert(*(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) == 5);
4501 Assert(pCtx->dr[4] == 10);
4502 *(uint32_t *)(pVM->hm.s.vmx.pScratch + 16 + 8) = 0xff;
4503#endif
4504
4505#ifdef VBOX_STRICT
4506 AssertMsg(pCache->TestIn.HCPhysCpuPage == HCPhysCpuPage, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysCpuPage, HCPhysCpuPage));
4507 AssertMsg(pCache->TestIn.HCPhysVmcs == pVCpu->hm.s.vmx.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4508 pVCpu->hm.s.vmx.HCPhysVmcs));
4509 AssertMsg(pCache->TestIn.HCPhysVmcs == pCache->TestOut.HCPhysVmcs, ("%RHp vs %RHp\n", pCache->TestIn.HCPhysVmcs,
4510 pCache->TestOut.HCPhysVmcs));
4511 AssertMsg(pCache->TestIn.pCache == pCache->TestOut.pCache, ("%RGv vs %RGv\n", pCache->TestIn.pCache,
4512 pCache->TestOut.pCache));
4513 AssertMsg(pCache->TestIn.pCache == VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache),
4514 ("%RGv vs %RGv\n", pCache->TestIn.pCache, VM_RC_ADDR(pVM, &pVM->aCpus[pVCpu->idCpu].hm.s.vmx.VMCSCache)));
4515 AssertMsg(pCache->TestIn.pCtx == pCache->TestOut.pCtx, ("%RGv vs %RGv\n", pCache->TestIn.pCtx,
4516 pCache->TestOut.pCtx));
4517 Assert(!(pCache->TestOut.eflags & X86_EFL_IF));
4518#endif
4519 return rc;
4520}
4521
4522
4523/**
4524 * Initialize the VMCS-Read cache. The VMCS cache is used for 32-bit hosts
4525 * running 64-bit guests (except 32-bit Darwin which runs with 64-bit paging in
4526 * 32-bit mode) for 64-bit fields that cannot be accessed in 32-bit mode. Some
4527 * 64-bit fields -can- be accessed (those that have a 32-bit FULL & HIGH part).
4528 *
4529 * @returns VBox status code.
4530 * @param pVM Pointer to the VM.
4531 * @param pVCpu Pointer to the VMCPU.
4532 */
4533static int hmR0VmxInitVmcsReadCache(PVM pVM, PVMCPU pVCpu)
4534{
4535#define VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, idxField) \
4536{ \
4537 Assert(pCache->Read.aField[idxField##_CACHE_IDX] == 0); \
4538 pCache->Read.aField[idxField##_CACHE_IDX] = idxField; \
4539 pCache->Read.aFieldVal[idxField##_CACHE_IDX] = 0; \
4540 ++cReadFields; \
4541}
4542
4543 AssertPtr(pVM);
4544 AssertPtr(pVCpu);
4545 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4546 uint32_t cReadFields = 0;
4547
4548 /*
4549 * Don't remove the #if 0'd fields in this code. They're listed here for consistency
4550 * and serve to indicate exceptions to the rules.
4551 */
4552
4553 /* Guest-natural selector base fields. */
4554#if 0
4555 /* These are 32-bit in practice. See Intel spec. 2.5 "Control Registers". */
4556 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR0);
4557 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR4);
4558#endif
4559 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_ES_BASE);
4560 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CS_BASE);
4561 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SS_BASE);
4562 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_DS_BASE);
4563 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_FS_BASE);
4564 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GS_BASE);
4565 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_LDTR_BASE);
4566 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_TR_BASE);
4567 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_GDTR_BASE);
4568 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_IDTR_BASE);
4569 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RSP);
4570 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_RIP);
4571#if 0
4572 /* Unused natural width guest-state fields. */
4573 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS);
4574 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3); /* Handled in Nested Paging case */
4575#endif
4576 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_ESP);
4577 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_SYSENTER_EIP);
4578
4579 /* 64-bit guest-state fields; unused as we use two 32-bit VMREADs for these 64-bit fields (using "FULL" and "HIGH" fields). */
4580#if 0
4581 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL);
4582 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_DEBUGCTL_FULL);
4583 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PAT_FULL);
4584 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_EFER_FULL);
4585 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL);
4586 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE0_FULL);
4587 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE1_FULL);
4588 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE2_FULL);
4589 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS64_GUEST_PDPTE3_FULL);
4590#endif
4591
4592 /* Natural width guest-state fields. */
4593 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_QUALIFICATION);
4594#if 0
4595 /* Currently unused field. */
4596 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR);
4597#endif
4598
4599 if (pVM->hm.s.fNestedPaging)
4600 {
4601 VMXLOCAL_INIT_READ_CACHE_FIELD(pCache, VMX_VMCS_GUEST_CR3);
4602 AssertMsg(cReadFields == VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields,
4603 VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX));
4604 pCache->Read.cValidEntries = VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX;
4605 }
4606 else
4607 {
4608 AssertMsg(cReadFields == VMX_VMCS_MAX_CACHE_IDX, ("cReadFields=%u expected %u\n", cReadFields, VMX_VMCS_MAX_CACHE_IDX));
4609 pCache->Read.cValidEntries = VMX_VMCS_MAX_CACHE_IDX;
4610 }
4611
4612#undef VMXLOCAL_INIT_READ_CACHE_FIELD
4613 return VINF_SUCCESS;
4614}
4615
4616
4617/**
4618 * Writes a field into the VMCS. This can either directly invoke a VMWRITE or
4619 * queue up the VMWRITE by using the VMCS write cache (on 32-bit hosts, except
4620 * darwin, running 64-bit guests).
4621 *
4622 * @returns VBox status code.
4623 * @param pVCpu Pointer to the VMCPU.
4624 * @param idxField The VMCS field encoding.
4625 * @param u64Val 16, 32 or 64 bits value.
4626 */
4627VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4628{
4629 int rc;
4630 switch (idxField)
4631 {
4632 /*
4633 * These fields consists of a "FULL" and a "HIGH" part which can be written to individually.
4634 */
4635 /* 64-bit Control fields. */
4636 case VMX_VMCS64_CTRL_IO_BITMAP_A_FULL:
4637 case VMX_VMCS64_CTRL_IO_BITMAP_B_FULL:
4638 case VMX_VMCS64_CTRL_MSR_BITMAP_FULL:
4639 case VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL:
4640 case VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL:
4641 case VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL:
4642 case VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL:
4643 case VMX_VMCS64_CTRL_TSC_OFFSET_FULL:
4644 case VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL:
4645 case VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL:
4646 case VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL:
4647 case VMX_VMCS64_CTRL_EPTP_FULL:
4648 case VMX_VMCS64_CTRL_EPTP_LIST_FULL:
4649 /* 64-bit Guest-state fields. */
4650 case VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL:
4651 case VMX_VMCS64_GUEST_DEBUGCTL_FULL:
4652 case VMX_VMCS64_GUEST_PAT_FULL:
4653 case VMX_VMCS64_GUEST_EFER_FULL:
4654 case VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL:
4655 case VMX_VMCS64_GUEST_PDPTE0_FULL:
4656 case VMX_VMCS64_GUEST_PDPTE1_FULL:
4657 case VMX_VMCS64_GUEST_PDPTE2_FULL:
4658 case VMX_VMCS64_GUEST_PDPTE3_FULL:
4659 /* 64-bit Host-state fields. */
4660 case VMX_VMCS64_HOST_FIELD_PAT_FULL:
4661 case VMX_VMCS64_HOST_FIELD_EFER_FULL:
4662 case VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL:
4663 {
4664 rc = VMXWriteVmcs32(idxField, u64Val);
4665 rc |= VMXWriteVmcs32(idxField + 1, (uint32_t)(u64Val >> 32));
4666 break;
4667 }
4668
4669 /*
4670 * These fields do not have high and low parts. Queue up the VMWRITE by using the VMCS write-cache (for 64-bit
4671 * values). When we switch the host to 64-bit mode for running 64-bit guests, these VMWRITEs get executed then.
4672 */
4673 /* Natural-width Guest-state fields. */
4674 case VMX_VMCS_GUEST_CR3:
4675 case VMX_VMCS_GUEST_ES_BASE:
4676 case VMX_VMCS_GUEST_CS_BASE:
4677 case VMX_VMCS_GUEST_SS_BASE:
4678 case VMX_VMCS_GUEST_DS_BASE:
4679 case VMX_VMCS_GUEST_FS_BASE:
4680 case VMX_VMCS_GUEST_GS_BASE:
4681 case VMX_VMCS_GUEST_LDTR_BASE:
4682 case VMX_VMCS_GUEST_TR_BASE:
4683 case VMX_VMCS_GUEST_GDTR_BASE:
4684 case VMX_VMCS_GUEST_IDTR_BASE:
4685 case VMX_VMCS_GUEST_RSP:
4686 case VMX_VMCS_GUEST_RIP:
4687 case VMX_VMCS_GUEST_SYSENTER_ESP:
4688 case VMX_VMCS_GUEST_SYSENTER_EIP:
4689 {
4690 if (!(u64Val >> 32))
4691 {
4692 /* If this field is 64-bit, VT-x will zero out the top bits. */
4693 rc = VMXWriteVmcs32(idxField, (uint32_t)u64Val);
4694 }
4695 else
4696 {
4697 /* Assert that only the 32->64 switcher case should ever come here. */
4698 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests);
4699 rc = VMXWriteCachedVmcsEx(pVCpu, idxField, u64Val);
4700 }
4701 break;
4702 }
4703
4704 default:
4705 {
4706 AssertMsgFailed(("VMXWriteVmcs64Ex: Invalid field %#RX32 (pVCpu=%p u64Val=%#RX64)\n", idxField, pVCpu, u64Val));
4707 rc = VERR_INVALID_PARAMETER;
4708 break;
4709 }
4710 }
4711 AssertRCReturn(rc, rc);
4712 return rc;
4713}
4714
4715
4716/**
4717 * Queue up a VMWRITE by using the VMCS write cache. This is only used on 32-bit
4718 * hosts (except darwin) for 64-bit guests.
4719 *
4720 * @param pVCpu Pointer to the VMCPU.
4721 * @param idxField The VMCS field encoding.
4722 * @param u64Val 16, 32 or 64 bits value.
4723 */
4724VMMR0DECL(int) VMXWriteCachedVmcsEx(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val)
4725{
4726 AssertPtr(pVCpu);
4727 PVMCSCACHE pCache = &pVCpu->hm.s.vmx.VMCSCache;
4728
4729 AssertMsgReturn(pCache->Write.cValidEntries < VMCSCACHE_MAX_ENTRY - 1,
4730 ("entries=%u\n", pCache->Write.cValidEntries), VERR_ACCESS_DENIED);
4731
4732 /* Make sure there are no duplicates. */
4733 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4734 {
4735 if (pCache->Write.aField[i] == idxField)
4736 {
4737 pCache->Write.aFieldVal[i] = u64Val;
4738 return VINF_SUCCESS;
4739 }
4740 }
4741
4742 pCache->Write.aField[pCache->Write.cValidEntries] = idxField;
4743 pCache->Write.aFieldVal[pCache->Write.cValidEntries] = u64Val;
4744 pCache->Write.cValidEntries++;
4745 return VINF_SUCCESS;
4746}
4747
4748/* Enable later when the assembly code uses these as callbacks. */
4749#if 0
4750/*
4751 * Loads the VMCS write-cache into the CPU (by executing VMWRITEs).
4752 *
4753 * @param pVCpu Pointer to the VMCPU.
4754 * @param pCache Pointer to the VMCS cache.
4755 *
4756 * @remarks No-long-jump zone!!!
4757 */
4758VMMR0DECL(void) VMXWriteCachedVmcsLoad(PVMCPU pVCpu, PVMCSCACHE pCache)
4759{
4760 AssertPtr(pCache);
4761 for (uint32_t i = 0; i < pCache->Write.cValidEntries; i++)
4762 {
4763 int rc = VMXWriteVmcs64(pCache->Write.aField[i], pCache->Write.aFieldVal[i]);
4764 AssertRC(rc);
4765 }
4766 pCache->Write.cValidEntries = 0;
4767}
4768
4769
4770/**
4771 * Stores the VMCS read-cache from the CPU (by executing VMREADs).
4772 *
4773 * @param pVCpu Pointer to the VMCPU.
4774 * @param pCache Pointer to the VMCS cache.
4775 *
4776 * @remarks No-long-jump zone!!!
4777 */
4778VMMR0DECL(void) VMXReadCachedVmcsStore(PVMCPU pVCpu, PVMCSCACHE pCache)
4779{
4780 AssertPtr(pCache);
4781 for (uint32_t i = 0; i < pCache->Read.cValidEntries; i++)
4782 {
4783 int rc = VMXReadVmcs64(pCache->Read.aField[i], &pCache->Read.aFieldVal[i]);
4784 AssertRC(rc);
4785 }
4786}
4787#endif
4788#endif /* HC_ARCH_BITS == 32 && defined(VBOX_ENABLE_64_BITS_GUESTS) && !defined(VBOX_WITH_HYBRID_32BIT_KERNEL) */
4789
4790
4791/**
4792 * Sets up the usage of TSC-offsetting and updates the VMCS. If offsetting is
4793 * not possible, cause VM-exits on RDTSC(P)s. Also sets up the VMX preemption
4794 * timer.
4795 *
4796 * @returns VBox status code.
4797 * @param pVCpu Pointer to the VMCPU.
4798 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4799 * out-of-sync. Make sure to update the required fields
4800 * before using them.
4801 * @remarks No-long-jump zone!!!
4802 */
4803static void hmR0VmxUpdateTscOffsettingAndPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4804{
4805 int rc = VERR_INTERNAL_ERROR_5;
4806 bool fOffsettedTsc = false;
4807 PVM pVM = pVCpu->CTX_SUFF(pVM);
4808 if (pVM->hm.s.vmx.fUsePreemptTimer)
4809 {
4810 uint64_t cTicksToDeadline = TMCpuTickGetDeadlineAndTscOffset(pVCpu, &fOffsettedTsc, &pVCpu->hm.s.vmx.u64TSCOffset);
4811
4812 /* Make sure the returned values have sane upper and lower boundaries. */
4813 uint64_t u64CpuHz = SUPGetCpuHzFromGIP(g_pSUPGlobalInfoPage);
4814 cTicksToDeadline = RT_MIN(cTicksToDeadline, u64CpuHz / 64); /* 1/64th of a second */
4815 cTicksToDeadline = RT_MAX(cTicksToDeadline, u64CpuHz / 2048); /* 1/2048th of a second */
4816 cTicksToDeadline >>= pVM->hm.s.vmx.cPreemptTimerShift;
4817
4818 uint32_t cPreemptionTickCount = (uint32_t)RT_MIN(cTicksToDeadline, UINT32_MAX - 16);
4819 rc = VMXWriteVmcs32(VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE, cPreemptionTickCount); AssertRC(rc);
4820 }
4821 else
4822 fOffsettedTsc = TMCpuTickCanUseRealTSC(pVCpu, &pVCpu->hm.s.vmx.u64TSCOffset);
4823
4824 if (fOffsettedTsc)
4825 {
4826 uint64_t u64CurTSC = ASMReadTSC();
4827 if (u64CurTSC + pVCpu->hm.s.vmx.u64TSCOffset >= TMCpuTickGetLastSeen(pVCpu))
4828 {
4829 /* Note: VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT takes precedence over TSC_OFFSET, applies to RDTSCP too. */
4830 rc = VMXWriteVmcs64(VMX_VMCS64_CTRL_TSC_OFFSET_FULL, pVCpu->hm.s.vmx.u64TSCOffset); AssertRC(rc);
4831
4832 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4833 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4834 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscOffset);
4835 }
4836 else
4837 {
4838 /* VM-exit on RDTSC(P) as we would otherwise pass decreasing TSC values to the guest. */
4839 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4840 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4841 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscInterceptOverFlow);
4842 }
4843 }
4844 else
4845 {
4846 /* We can't use TSC-offsetting (non-fixed TSC, warp drive active etc.), VM-exit on RDTSC(P). */
4847 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT;
4848 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls); AssertRC(rc);
4849 STAM_COUNTER_INC(&pVCpu->hm.s.StatTscIntercept);
4850 }
4851}
4852
4853
4854/**
4855 * Determines if an exception is a contributory exception. Contributory
4856 * exceptions are ones which can cause double-faults. Page-fault is
4857 * intentionally not included here as it's a conditional contributory exception.
4858 *
4859 * @returns true if the exception is contributory, false otherwise.
4860 * @param uVector The exception vector.
4861 */
4862DECLINLINE(bool) hmR0VmxIsContributoryXcpt(const uint32_t uVector)
4863{
4864 switch (uVector)
4865 {
4866 case X86_XCPT_GP:
4867 case X86_XCPT_SS:
4868 case X86_XCPT_NP:
4869 case X86_XCPT_TS:
4870 case X86_XCPT_DE:
4871 return true;
4872 default:
4873 break;
4874 }
4875 return false;
4876}
4877
4878
4879/**
4880 * Sets an event as a pending event to be injected into the guest.
4881 *
4882 * @param pVCpu Pointer to the VMCPU.
4883 * @param u32IntrInfo The VM-entry interruption-information field.
4884 * @param cbInstr The VM-entry instruction length in bytes (for software
4885 * interrupts, exceptions and privileged software
4886 * exceptions).
4887 * @param u32ErrCode The VM-entry exception error code.
4888 * @param GCPtrFaultAddress The fault-address (CR2) in case it's a
4889 * page-fault.
4890 *
4891 * @remarks Statistics counter assumes this is a guest event being injected or
4892 * re-injected into the guest, i.e. 'StatInjectPendingReflect' is
4893 * always incremented.
4894 */
4895DECLINLINE(void) hmR0VmxSetPendingEvent(PVMCPU pVCpu, uint32_t u32IntrInfo, uint32_t cbInstr, uint32_t u32ErrCode,
4896 RTGCUINTPTR GCPtrFaultAddress)
4897{
4898 Assert(!pVCpu->hm.s.Event.fPending);
4899 pVCpu->hm.s.Event.fPending = true;
4900 pVCpu->hm.s.Event.u64IntrInfo = u32IntrInfo;
4901 pVCpu->hm.s.Event.u32ErrCode = u32ErrCode;
4902 pVCpu->hm.s.Event.cbInstr = cbInstr;
4903 pVCpu->hm.s.Event.GCPtrFaultAddress = GCPtrFaultAddress;
4904
4905 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectPendingReflect);
4906}
4907
4908
4909/**
4910 * Sets a double-fault (#DF) exception as pending-for-injection into the VM.
4911 *
4912 * @param pVCpu Pointer to the VMCPU.
4913 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4914 * out-of-sync. Make sure to update the required fields
4915 * before using them.
4916 */
4917DECLINLINE(void) hmR0VmxSetPendingXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
4918{
4919 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
4920 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
4921 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
4922 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
4923}
4924
4925
4926/**
4927 * Handle a condition that occurred while delivering an event through the guest
4928 * IDT.
4929 *
4930 * @returns VBox status code (informational error codes included).
4931 * @retval VINF_SUCCESS if we should continue handling the VM-exit.
4932 * @retval VINF_HM_DOUBLE_FAULT if a #DF condition was detected and we ought to
4933 * continue execution of the guest which will delivery the #DF.
4934 * @retval VINF_EM_RESET if we detected a triple-fault condition.
4935 *
4936 * @param pVCpu Pointer to the VMCPU.
4937 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
4938 * out-of-sync. Make sure to update the required fields
4939 * before using them.
4940 * @param pVmxTransient Pointer to the VMX transient structure.
4941 *
4942 * @remarks No-long-jump zone!!!
4943 */
4944static int hmR0VmxCheckExitDueToEventDelivery(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
4945{
4946 int rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
4947 AssertRC(rc);
4948 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
4949 {
4950 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
4951 AssertRCReturn(rc, rc);
4952
4953 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
4954 uint32_t uExitVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(pVmxTransient->uExitIntrInfo);
4955 uint32_t uIdtVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
4956
4957 typedef enum
4958 {
4959 VMXREFLECTXCPT_XCPT, /* Reflect the exception to the guest or for further evaluation by VMM. */
4960 VMXREFLECTXCPT_DF, /* Reflect the exception as a double-fault to the guest. */
4961 VMXREFLECTXCPT_TF, /* Indicate a triple faulted state to the VMM. */
4962 VMXREFLECTXCPT_NONE /* Nothing to reflect. */
4963 } VMXREFLECTXCPT;
4964
4965 /* See Intel spec. 30.7.1.1 "Reflecting Exceptions to Guest Software". */
4966 VMXREFLECTXCPT enmReflect = VMXREFLECTXCPT_NONE;
4967 if (VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntrInfo))
4968 {
4969 if (uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT)
4970 {
4971 enmReflect = VMXREFLECTXCPT_XCPT;
4972#ifdef VBOX_STRICT
4973 if ( hmR0VmxIsContributoryXcpt(uIdtVector)
4974 && uExitVector == X86_XCPT_PF)
4975 {
4976 Log4(("IDT: vcpu[%RU32] Contributory #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
4977 }
4978#endif
4979 if ( uExitVector == X86_XCPT_PF
4980 && uIdtVector == X86_XCPT_PF)
4981 {
4982 pVmxTransient->fVectoringPF = true;
4983 Log4(("IDT: vcpu[%RU32] Vectoring #PF uCR2=%#RX64\n", pVCpu->idCpu, pMixedCtx->cr2));
4984 }
4985 else if ( (pVCpu->hm.s.vmx.u32XcptBitmap & HMVMX_CONTRIBUTORY_XCPT_MASK)
4986 && hmR0VmxIsContributoryXcpt(uExitVector)
4987 && ( hmR0VmxIsContributoryXcpt(uIdtVector)
4988 || uIdtVector == X86_XCPT_PF))
4989 {
4990 enmReflect = VMXREFLECTXCPT_DF;
4991 }
4992 else if (uIdtVector == X86_XCPT_DF)
4993 enmReflect = VMXREFLECTXCPT_TF;
4994 }
4995 else if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
4996 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
4997 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
4998 {
4999 /*
5000 * Ignore software interrupts (INT n), software exceptions (#BP, #OF) and privileged software exception
5001 * (whatever they are) as they reoccur when restarting the instruction.
5002 */
5003 enmReflect = VMXREFLECTXCPT_XCPT;
5004 }
5005 }
5006 else
5007 {
5008 /*
5009 * If event delivery caused an EPT violation/misconfig or APIC access VM-exit, then the VM-exit
5010 * interruption-information will not be valid and we end up here. In such cases, it is sufficient to reflect the
5011 * original exception to the guest after handling the VM-exit.
5012 */
5013 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
5014 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_EXT_INT
5015 || uIntType == VMX_IDT_VECTORING_INFO_TYPE_NMI)
5016 {
5017 enmReflect = VMXREFLECTXCPT_XCPT;
5018 }
5019 }
5020
5021 switch (enmReflect)
5022 {
5023 case VMXREFLECTXCPT_XCPT:
5024 {
5025 Assert( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
5026 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
5027 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT);
5028
5029 uint32_t u32ErrCode = 0;
5030 if (VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo))
5031 {
5032 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
5033 AssertRCReturn(rc, rc);
5034 u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
5035 }
5036
5037 /* If uExitVector is #PF, CR2 value will be updated from the VMCS if it's a guest #PF. See hmR0VmxExitXcptPF(). */
5038 hmR0VmxSetPendingEvent(pVCpu, VMX_ENTRY_INTR_INFO_FROM_EXIT_IDT_INFO(pVmxTransient->uIdtVectoringInfo),
5039 0 /* cbInstr */, u32ErrCode, pMixedCtx->cr2);
5040 rc = VINF_SUCCESS;
5041 Log4(("IDT: vcpu[%RU32] Pending vectoring event %#RX64 Err=%#RX32\n", pVCpu->idCpu,
5042 pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.u32ErrCode));
5043
5044 break;
5045 }
5046
5047 case VMXREFLECTXCPT_DF:
5048 {
5049 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
5050 rc = VINF_HM_DOUBLE_FAULT;
5051 Log4(("IDT: vcpu[%RU32] Pending vectoring #DF %#RX64 uIdtVector=%#x uExitVector=%#x\n", pVCpu->idCpu,
5052 pVCpu->hm.s.Event.u64IntrInfo, uIdtVector, uExitVector));
5053
5054 break;
5055 }
5056
5057 case VMXREFLECTXCPT_TF:
5058 {
5059 rc = VINF_EM_RESET;
5060 Log4(("IDT: vcpu[%RU32] Pending vectoring triple-fault uIdt=%#x uExit=%#x\n", pVCpu->idCpu, uIdtVector,
5061 uExitVector));
5062 break;
5063 }
5064
5065 default:
5066 Assert(rc == VINF_SUCCESS);
5067 break;
5068 }
5069 }
5070 Assert(rc == VINF_SUCCESS || rc == VINF_HM_DOUBLE_FAULT || rc == VINF_EM_RESET);
5071 return rc;
5072}
5073
5074
5075/**
5076 * Saves the guest's CR0 register from the VMCS into the guest-CPU context.
5077 *
5078 * @returns VBox status code.
5079 * @param pVCpu Pointer to the VMCPU.
5080 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5081 * out-of-sync. Make sure to update the required fields
5082 * before using them.
5083 *
5084 * @remarks No-long-jump zone!!!
5085 */
5086static int hmR0VmxSaveGuestCR0(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5087{
5088 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0))
5089 {
5090 uint32_t uVal = 0;
5091 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &uVal);
5092 AssertRCReturn(rc, rc);
5093 uint32_t uShadow = 0;
5094 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uShadow);
5095 AssertRCReturn(rc, rc);
5096
5097 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR0Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR0Mask);
5098 CPUMSetGuestCR0(pVCpu, uVal);
5099 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR0;
5100 }
5101 return VINF_SUCCESS;
5102}
5103
5104
5105/**
5106 * Saves the guest's CR4 register from the VMCS into the guest-CPU context.
5107 *
5108 * @returns VBox status code.
5109 * @param pVCpu Pointer to the VMCPU.
5110 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5111 * out-of-sync. Make sure to update the required fields
5112 * before using them.
5113 *
5114 * @remarks No-long-jump zone!!!
5115 */
5116static int hmR0VmxSaveGuestCR4(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5117{
5118 int rc = VINF_SUCCESS;
5119 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4))
5120 {
5121 uint32_t uVal = 0;
5122 uint32_t uShadow = 0;
5123 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &uVal);
5124 AssertRCReturn(rc, rc);
5125 rc = VMXReadVmcs32(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uShadow);
5126 AssertRCReturn(rc, rc);
5127
5128 uVal = (uShadow & pVCpu->hm.s.vmx.u32CR4Mask) | (uVal & ~pVCpu->hm.s.vmx.u32CR4Mask);
5129 CPUMSetGuestCR4(pVCpu, uVal);
5130 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR4;
5131 }
5132 return rc;
5133}
5134
5135
5136/**
5137 * Saves the guest's RIP register from the VMCS into the guest-CPU context.
5138 *
5139 * @returns VBox status code.
5140 * @param pVCpu Pointer to the VMCPU.
5141 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5142 * out-of-sync. Make sure to update the required fields
5143 * before using them.
5144 *
5145 * @remarks No-long-jump zone!!!
5146 */
5147static int hmR0VmxSaveGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5148{
5149 int rc = VINF_SUCCESS;
5150 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP))
5151 {
5152 uint64_t u64Val = 0;
5153 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RIP, &u64Val);
5154 AssertRCReturn(rc, rc);
5155
5156 pMixedCtx->rip = u64Val;
5157 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RIP;
5158 }
5159 return rc;
5160}
5161
5162
5163/**
5164 * Saves the guest's RSP register from the VMCS into the guest-CPU context.
5165 *
5166 * @returns VBox status code.
5167 * @param pVCpu Pointer to the VMCPU.
5168 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5169 * out-of-sync. Make sure to update the required fields
5170 * before using them.
5171 *
5172 * @remarks No-long-jump zone!!!
5173 */
5174static int hmR0VmxSaveGuestRsp(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5175{
5176 int rc = VINF_SUCCESS;
5177 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RSP))
5178 {
5179 uint64_t u64Val = 0;
5180 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_RSP, &u64Val);
5181 AssertRCReturn(rc, rc);
5182
5183 pMixedCtx->rsp = u64Val;
5184 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RSP;
5185 }
5186 return rc;
5187}
5188
5189
5190/**
5191 * Saves the guest's RFLAGS from the VMCS into the guest-CPU context.
5192 *
5193 * @returns VBox status code.
5194 * @param pVCpu Pointer to the VMCPU.
5195 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5196 * out-of-sync. Make sure to update the required fields
5197 * before using them.
5198 *
5199 * @remarks No-long-jump zone!!!
5200 */
5201static int hmR0VmxSaveGuestRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5202{
5203 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS))
5204 {
5205 uint32_t uVal = 0;
5206 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &uVal);
5207 AssertRCReturn(rc, rc);
5208
5209 pMixedCtx->eflags.u32 = uVal;
5210 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active) /* Undo our real-on-v86-mode changes to eflags if necessary. */
5211 {
5212 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
5213 Log4(("Saving real-mode EFLAGS VT-x view=%#RX32\n", pMixedCtx->eflags.u32));
5214
5215 pMixedCtx->eflags.Bits.u1VM = 0;
5216 pMixedCtx->eflags.Bits.u2IOPL = pVCpu->hm.s.vmx.RealMode.Eflags.Bits.u2IOPL;
5217 }
5218
5219 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_RFLAGS;
5220 }
5221 return VINF_SUCCESS;
5222}
5223
5224
5225/**
5226 * Wrapper for saving the guest's RIP, RSP and RFLAGS from the VMCS into the
5227 * guest-CPU context.
5228 */
5229DECLINLINE(int) hmR0VmxSaveGuestRipRspRflags(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5230{
5231 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5232 rc |= hmR0VmxSaveGuestRsp(pVCpu, pMixedCtx);
5233 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
5234 return rc;
5235}
5236
5237
5238/**
5239 * Saves the guest's interruptibility-state ("interrupt shadow" as AMD calls it)
5240 * from the guest-state area in the VMCS.
5241 *
5242 * @param pVCpu Pointer to the VMCPU.
5243 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5244 * out-of-sync. Make sure to update the required fields
5245 * before using them.
5246 *
5247 * @remarks No-long-jump zone!!!
5248 */
5249static void hmR0VmxSaveGuestIntrState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5250{
5251 uint32_t uIntrState = 0;
5252 int rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
5253 AssertRC(rc);
5254
5255 if (!uIntrState)
5256 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS);
5257 else
5258 {
5259 Assert( uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
5260 || uIntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
5261 rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
5262 AssertRC(rc);
5263 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* for hmR0VmxGetGuestIntrState(). */
5264 AssertRC(rc);
5265
5266 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
5267 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
5268 }
5269}
5270
5271
5272/**
5273 * Saves the guest's activity state.
5274 *
5275 * @returns VBox status code.
5276 * @param pVCpu Pointer to the VMCPU.
5277 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5278 * out-of-sync. Make sure to update the required fields
5279 * before using them.
5280 *
5281 * @remarks No-long-jump zone!!!
5282 */
5283static int hmR0VmxSaveGuestActivityState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5284{
5285 /* Nothing to do for now until we make use of different guest-CPU activity state. Just update the flag. */
5286 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_ACTIVITY_STATE;
5287 return VINF_SUCCESS;
5288}
5289
5290
5291/**
5292 * Saves the guest SYSENTER MSRs (SYSENTER_CS, SYSENTER_EIP, SYSENTER_ESP) from
5293 * the current VMCS into the guest-CPU context.
5294 *
5295 * @returns VBox status code.
5296 * @param pVCpu Pointer to the VMCPU.
5297 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5298 * out-of-sync. Make sure to update the required fields
5299 * before using them.
5300 *
5301 * @remarks No-long-jump zone!!!
5302 */
5303static int hmR0VmxSaveGuestSysenterMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5304{
5305 int rc = VINF_SUCCESS;
5306 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR))
5307 {
5308 uint32_t u32Val = 0;
5309 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_SYSENTER_CS, &u32Val); AssertRCReturn(rc, rc);
5310 pMixedCtx->SysEnter.cs = u32Val;
5311 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_CS_MSR;
5312 }
5313
5314 uint64_t u64Val = 0;
5315 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR))
5316 {
5317 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_EIP, &u64Val); AssertRCReturn(rc, rc);
5318 pMixedCtx->SysEnter.eip = u64Val;
5319 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_EIP_MSR;
5320 }
5321 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR))
5322 {
5323 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_SYSENTER_ESP, &u64Val); AssertRCReturn(rc, rc);
5324 pMixedCtx->SysEnter.esp = u64Val;
5325 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SYSENTER_ESP_MSR;
5326 }
5327 return rc;
5328}
5329
5330
5331/**
5332 * Saves the guest FS_BASE MSRs from the current VMCS into the guest-CPU
5333 * context.
5334 *
5335 * @returns VBox status code.
5336 * @param pVCpu Pointer to the VMCPU.
5337 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5338 * out-of-sync. Make sure to update the required fields
5339 * before using them.
5340 *
5341 * @remarks No-long-jump zone!!!
5342 */
5343static int hmR0VmxSaveGuestFSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5344{
5345 int rc = VINF_SUCCESS;
5346 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_FS_BASE_MSR))
5347 {
5348 uint64_t u64Val = 0;
5349 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_FS_BASE, &u64Val); AssertRCReturn(rc, rc);
5350 pMixedCtx->fs.u64Base = u64Val;
5351 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_FS_BASE_MSR;
5352 }
5353 return rc;
5354}
5355
5356
5357/**
5358 * Saves the guest GS_BASE MSRs from the current VMCS into the guest-CPU
5359 * context.
5360 *
5361 * @returns VBox status code.
5362 * @param pVCpu Pointer to the VMCPU.
5363 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5364 * out-of-sync. Make sure to update the required fields
5365 * before using them.
5366 *
5367 * @remarks No-long-jump zone!!!
5368 */
5369static int hmR0VmxSaveGuestGSBaseMsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5370{
5371 int rc = VINF_SUCCESS;
5372 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GS_BASE_MSR))
5373 {
5374 uint64_t u64Val = 0;
5375 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GS_BASE, &u64Val); AssertRCReturn(rc, rc);
5376 pMixedCtx->gs.u64Base = u64Val;
5377 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GS_BASE_MSR;
5378 }
5379 return rc;
5380}
5381
5382
5383/**
5384 * Saves the auto load/store'd guest MSRs from the current VMCS into the
5385 * guest-CPU context. Currently these are LSTAR, STAR, SFMASK, KERNEL-GS BASE
5386 * and TSC_AUX.
5387 *
5388 * @returns VBox status code.
5389 * @param pVCpu Pointer to the VMCPU.
5390 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5391 * out-of-sync. Make sure to update the required fields
5392 * before using them.
5393 *
5394 * @remarks No-long-jump zone!!!
5395 */
5396static int hmR0VmxSaveGuestAutoLoadStoreMsrs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5397{
5398 if (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS)
5399 return VINF_SUCCESS;
5400
5401#ifdef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
5402 for (uint32_t i = 0; i < pVCpu->hm.s.vmx.cGuestMsrs; i++)
5403 {
5404 PVMXMSR pMsr = (PVMXMSR)pVCpu->hm.s.vmx.pvGuestMsr;
5405 pMsr += i;
5406 switch (pMsr->u32IndexMSR)
5407 {
5408 case MSR_K8_LSTAR: pMixedCtx->msrLSTAR = pMsr->u64Value; break;
5409 case MSR_K6_STAR: pMixedCtx->msrSTAR = pMsr->u64Value; break;
5410 case MSR_K8_SF_MASK: pMixedCtx->msrSFMASK = pMsr->u64Value; break;
5411 case MSR_K8_TSC_AUX: CPUMSetGuestMsr(pVCpu, MSR_K8_TSC_AUX, pMsr->u64Value); break;
5412 case MSR_K8_KERNEL_GS_BASE: pMixedCtx->msrKERNELGSBASE = pMsr->u64Value; break;
5413 case MSR_K6_EFER: /* EFER can't be changed without causing a VM-exit. */ break;
5414 default:
5415 {
5416 AssertFailed();
5417 return VERR_HM_UNEXPECTED_LD_ST_MSR;
5418 }
5419 }
5420 }
5421#endif
5422
5423 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_AUTO_LOAD_STORE_MSRS;
5424 return VINF_SUCCESS;
5425}
5426
5427
5428/**
5429 * Saves the guest control registers from the current VMCS into the guest-CPU
5430 * context.
5431 *
5432 * @returns VBox status code.
5433 * @param pVCpu Pointer to the VMCPU.
5434 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5435 * out-of-sync. Make sure to update the required fields
5436 * before using them.
5437 *
5438 * @remarks No-long-jump zone!!!
5439 */
5440static int hmR0VmxSaveGuestControlRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5441{
5442 /* Guest CR0. Guest FPU. */
5443 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5444 AssertRCReturn(rc, rc);
5445
5446 /* Guest CR4. */
5447 rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
5448 AssertRCReturn(rc, rc);
5449
5450 /* Guest CR2 - updated always during the world-switch or in #PF. */
5451 /* Guest CR3. Only changes with Nested Paging. This must be done -after- saving CR0 and CR4 from the guest! */
5452 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR3))
5453 {
5454 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
5455 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR4);
5456
5457 PVM pVM = pVCpu->CTX_SUFF(pVM);
5458 if ( pVM->hm.s.vmx.fUnrestrictedGuest
5459 || ( pVM->hm.s.fNestedPaging
5460 && CPUMIsGuestPagingEnabledEx(pMixedCtx)))
5461 {
5462 uint64_t u64Val = 0;
5463 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_CR3, &u64Val);
5464 if (pMixedCtx->cr3 != u64Val)
5465 {
5466 CPUMSetGuestCR3(pVCpu, u64Val);
5467 if (VMMRZCallRing3IsEnabled(pVCpu))
5468 {
5469 PGMUpdateCR3(pVCpu, u64Val);
5470 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5471 }
5472 else
5473 {
5474 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMUpdateCR3().*/
5475 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_CR3);
5476 }
5477 }
5478
5479 /* If the guest is in PAE mode, sync back the PDPE's into the guest state. */
5480 if (CPUMIsGuestInPAEModeEx(pMixedCtx)) /* Reads CR0, CR4 and EFER MSR (EFER is always up-to-date). */
5481 {
5482 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE0_FULL, &pVCpu->hm.s.aPdpes[0].u); AssertRCReturn(rc, rc);
5483 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE1_FULL, &pVCpu->hm.s.aPdpes[1].u); AssertRCReturn(rc, rc);
5484 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE2_FULL, &pVCpu->hm.s.aPdpes[2].u); AssertRCReturn(rc, rc);
5485 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PDPTE3_FULL, &pVCpu->hm.s.aPdpes[3].u); AssertRCReturn(rc, rc);
5486
5487 if (VMMRZCallRing3IsEnabled(pVCpu))
5488 {
5489 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5490 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5491 }
5492 else
5493 {
5494 /* Set the force flag to inform PGM about it when necessary. It is cleared by PGMGstUpdatePaePdpes(). */
5495 VMCPU_FF_SET(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES);
5496 }
5497 }
5498 }
5499
5500 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_CR3;
5501 }
5502
5503 /*
5504 * Consider this scenario: VM-exit -> VMMRZCallRing3Enable() -> do stuff that causes a longjmp -> hmR0VmxCallRing3Callback()
5505 * -> VMMRZCallRing3Disable() -> hmR0VmxSaveGuestState() -> Set VMCPU_FF_HM_UPDATE_CR3 pending -> return from the longjmp
5506 * -> continue with VM-exit handling -> hmR0VmxSaveGuestControlRegs() and here we are.
5507 *
5508 * The longjmp exit path can't check these CR3 force-flags and call code that takes a lock again. We cover for it here.
5509 */
5510 if (VMMRZCallRing3IsEnabled(pVCpu))
5511 {
5512 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5513 PGMUpdateCR3(pVCpu, CPUMGetGuestCR3(pVCpu));
5514
5515 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5516 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5517
5518 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5519 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5520 }
5521
5522 return rc;
5523}
5524
5525
5526/**
5527 * Reads a guest segment register from the current VMCS into the guest-CPU
5528 * context.
5529 *
5530 * @returns VBox status code.
5531 * @param pVCpu Pointer to the VMCPU.
5532 * @param idxSel Index of the selector in the VMCS.
5533 * @param idxLimit Index of the segment limit in the VMCS.
5534 * @param idxBase Index of the segment base in the VMCS.
5535 * @param idxAccess Index of the access rights of the segment in the VMCS.
5536 * @param pSelReg Pointer to the segment selector.
5537 *
5538 * @remarks No-long-jump zone!!!
5539 * @remarks Never call this function directly!!! Use the VMXLOCAL_READ_SEG()
5540 * macro as that takes care of whether to read from the VMCS cache or
5541 * not.
5542 */
5543DECLINLINE(int) hmR0VmxReadSegmentReg(PVMCPU pVCpu, uint32_t idxSel, uint32_t idxLimit, uint32_t idxBase, uint32_t idxAccess,
5544 PCPUMSELREG pSelReg)
5545{
5546 uint32_t u32Val = 0;
5547 int rc = VMXReadVmcs32(idxSel, &u32Val);
5548 AssertRCReturn(rc, rc);
5549 pSelReg->Sel = (uint16_t)u32Val;
5550 pSelReg->ValidSel = (uint16_t)u32Val;
5551 pSelReg->fFlags = CPUMSELREG_FLAGS_VALID;
5552
5553 rc = VMXReadVmcs32(idxLimit, &u32Val);
5554 AssertRCReturn(rc, rc);
5555 pSelReg->u32Limit = u32Val;
5556
5557 uint64_t u64Val = 0;
5558 rc = VMXReadVmcsGstNByIdxVal(idxBase, &u64Val);
5559 AssertRCReturn(rc, rc);
5560 pSelReg->u64Base = u64Val;
5561
5562 rc = VMXReadVmcs32(idxAccess, &u32Val);
5563 AssertRCReturn(rc, rc);
5564 pSelReg->Attr.u = u32Val;
5565
5566 /*
5567 * If VT-x marks the segment as unusable, most other bits remain undefined:
5568 * - For CS the L, D and G bits have meaning.
5569 * - For SS the DPL has meaning (it -is- the CPL for Intel and VBox).
5570 * - For the remaining data segments no bits are defined.
5571 *
5572 * The present bit and the unusable bit has been observed to be set at the
5573 * same time (the selector was supposed to invalid as we started executing
5574 * a V8086 interrupt in ring-0).
5575 *
5576 * What should be important for the rest of the VBox code that the P bit is
5577 * cleared. Some of the other VBox code recognizes the unusable bit, but
5578 * AMD-V certainly don't, and REM doesn't really either. So, to be on the
5579 * safe side here, we'll strip off P and other bits we don't care about. If
5580 * any code breaks because Attr.u != 0 when Sel < 4, it should be fixed.
5581 *
5582 * See Intel spec. 27.3.2 "Saving Segment Registers and Descriptor-Table Registers".
5583 */
5584 if (pSelReg->Attr.u & X86DESCATTR_UNUSABLE)
5585 {
5586 Assert(idxSel != VMX_VMCS16_GUEST_FIELD_TR); /* TR is the only selector that can never be unusable. */
5587
5588 /* Masking off: X86DESCATTR_P, X86DESCATTR_LIMIT_HIGH, and X86DESCATTR_AVL. The latter two are really irrelevant. */
5589 pSelReg->Attr.u &= X86DESCATTR_UNUSABLE | X86DESCATTR_L | X86DESCATTR_D | X86DESCATTR_G
5590 | X86DESCATTR_DPL | X86DESCATTR_TYPE | X86DESCATTR_DT;
5591
5592 Log4(("hmR0VmxReadSegmentReg: Unusable idxSel=%#x attr=%#x -> %#x\n", idxSel, u32Val, pSelReg->Attr.u));
5593#ifdef DEBUG_bird
5594 AssertMsg((u32Val & ~X86DESCATTR_P) == pSelReg->Attr.u,
5595 ("%#x: %#x != %#x (sel=%#x base=%#llx limit=%#x)\n",
5596 idxSel, u32Val, pSelReg->Attr.u, pSelReg->Sel, pSelReg->u64Base, pSelReg->u32Limit));
5597#endif
5598 }
5599 return VINF_SUCCESS;
5600}
5601
5602
5603#ifdef VMX_USE_CACHED_VMCS_ACCESSES
5604# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5605 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5606 VMX_VMCS_GUEST_##Sel##_BASE_CACHE_IDX, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5607#else
5608# define VMXLOCAL_READ_SEG(Sel, CtxSel) \
5609 hmR0VmxReadSegmentReg(pVCpu, VMX_VMCS16_GUEST_FIELD_##Sel, VMX_VMCS32_GUEST_##Sel##_LIMIT, \
5610 VMX_VMCS_GUEST_##Sel##_BASE, VMX_VMCS32_GUEST_##Sel##_ACCESS_RIGHTS, &pMixedCtx->CtxSel)
5611#endif
5612
5613
5614/**
5615 * Saves the guest segment registers from the current VMCS into the guest-CPU
5616 * context.
5617 *
5618 * @returns VBox status code.
5619 * @param pVCpu Pointer to the VMCPU.
5620 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5621 * out-of-sync. Make sure to update the required fields
5622 * before using them.
5623 *
5624 * @remarks No-long-jump zone!!!
5625 */
5626static int hmR0VmxSaveGuestSegmentRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5627{
5628 /* Guest segment registers. */
5629 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_SEGMENT_REGS))
5630 {
5631 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); AssertRCReturn(rc, rc);
5632 rc = VMXLOCAL_READ_SEG(CS, cs); AssertRCReturn(rc, rc);
5633 rc = VMXLOCAL_READ_SEG(SS, ss); AssertRCReturn(rc, rc);
5634 rc = VMXLOCAL_READ_SEG(DS, ds); AssertRCReturn(rc, rc);
5635 rc = VMXLOCAL_READ_SEG(ES, es); AssertRCReturn(rc, rc);
5636 rc = VMXLOCAL_READ_SEG(FS, fs); AssertRCReturn(rc, rc);
5637 rc = VMXLOCAL_READ_SEG(GS, gs); AssertRCReturn(rc, rc);
5638
5639 /* Restore segment attributes for real-on-v86 mode hack. */
5640 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5641 {
5642 pMixedCtx->cs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrCS.u;
5643 pMixedCtx->ss.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrSS.u;
5644 pMixedCtx->ds.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrDS.u;
5645 pMixedCtx->es.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrES.u;
5646 pMixedCtx->fs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrFS.u;
5647 pMixedCtx->gs.Attr.u = pVCpu->hm.s.vmx.RealMode.AttrGS.u;
5648 }
5649 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_SEGMENT_REGS;
5650 }
5651
5652 return VINF_SUCCESS;
5653}
5654
5655
5656/**
5657 * Saves the guest descriptor table registers and task register from the current
5658 * VMCS into the guest-CPU context.
5659 *
5660 * @returns VBox status code.
5661 * @param pVCpu Pointer to the VMCPU.
5662 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5663 * out-of-sync. Make sure to update the required fields
5664 * before using them.
5665 *
5666 * @remarks No-long-jump zone!!!
5667 */
5668static int hmR0VmxSaveGuestTableRegs(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5669{
5670 int rc = VINF_SUCCESS;
5671
5672 /* Guest LDTR. */
5673 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_LDTR))
5674 {
5675 rc = VMXLOCAL_READ_SEG(LDTR, ldtr);
5676 AssertRCReturn(rc, rc);
5677 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_LDTR;
5678 }
5679
5680 /* Guest GDTR. */
5681 uint64_t u64Val = 0;
5682 uint32_t u32Val = 0;
5683 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_GDTR))
5684 {
5685 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_GDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5686 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5687 pMixedCtx->gdtr.pGdt = u64Val;
5688 pMixedCtx->gdtr.cbGdt = u32Val;
5689 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_GDTR;
5690 }
5691
5692 /* Guest IDTR. */
5693 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_IDTR))
5694 {
5695 rc = VMXReadVmcsGstN(VMX_VMCS_GUEST_IDTR_BASE, &u64Val); AssertRCReturn(rc, rc);
5696 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val); AssertRCReturn(rc, rc);
5697 pMixedCtx->idtr.pIdt = u64Val;
5698 pMixedCtx->idtr.cbIdt = u32Val;
5699 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_IDTR;
5700 }
5701
5702 /* Guest TR. */
5703 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_TR))
5704 {
5705 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
5706 AssertRCReturn(rc, rc);
5707
5708 /* For real-mode emulation using virtual-8086 mode we have the fake TSS (pRealModeTSS) in TR, don't save the fake one. */
5709 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
5710 {
5711 rc = VMXLOCAL_READ_SEG(TR, tr);
5712 AssertRCReturn(rc, rc);
5713 }
5714 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_TR;
5715 }
5716 return rc;
5717}
5718
5719#undef VMXLOCAL_READ_SEG
5720
5721
5722/**
5723 * Saves the guest debug-register DR7 from the current VMCS into the guest-CPU
5724 * context.
5725 *
5726 * @returns VBox status code.
5727 * @param pVCpu Pointer to the VMCPU.
5728 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5729 * out-of-sync. Make sure to update the required fields
5730 * before using them.
5731 *
5732 * @remarks No-long-jump zone!!!
5733 */
5734static int hmR0VmxSaveGuestDR7(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5735{
5736 if (!(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_DEBUG))
5737 {
5738 if (!CPUMIsHyperDebugStateActive(pVCpu))
5739 {
5740 /* Upper 32-bits are always zero. See Intel spec. 2.7.3 "Loading and Storing Debug Registers". */
5741 uint32_t u32Val;
5742 int rc = VMXReadVmcs32(VMX_VMCS_GUEST_DR7, &u32Val); AssertRCReturn(rc, rc);
5743 pMixedCtx->dr[7] = u32Val;
5744 }
5745
5746 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_DEBUG;
5747 }
5748 return VINF_SUCCESS;
5749}
5750
5751
5752/**
5753 * Saves the guest APIC state from the current VMCS into the guest-CPU context.
5754 *
5755 * @returns VBox status code.
5756 * @param pVCpu Pointer to the VMCPU.
5757 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
5758 * out-of-sync. Make sure to update the required fields
5759 * before using them.
5760 *
5761 * @remarks No-long-jump zone!!!
5762 */
5763static int hmR0VmxSaveGuestApicState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5764{
5765 /* Updating TPR is already done in hmR0VmxPostRunGuest(). Just update the flag. */
5766 pVCpu->hm.s.vmx.fUpdatedGuestState |= HMVMX_UPDATED_GUEST_APIC_STATE;
5767 return VINF_SUCCESS;
5768}
5769
5770
5771/**
5772 * Saves the entire guest state from the currently active VMCS into the
5773 * guest-CPU context. This essentially VMREADs all guest-data.
5774 *
5775 * @returns VBox status code.
5776 * @param pVCpu Pointer to the VMCPU.
5777 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5778 * out-of-sync. Make sure to update the required fields
5779 * before using them.
5780 */
5781static int hmR0VmxSaveGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5782{
5783 Assert(pVCpu);
5784 Assert(pMixedCtx);
5785
5786 if (pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL)
5787 return VINF_SUCCESS;
5788
5789 /* Though we can longjmp to ring-3 due to log-flushes here and get recalled again on the ring-3 callback path,
5790 there is no real need to. */
5791 if (VMMRZCallRing3IsEnabled(pVCpu))
5792 VMMR0LogFlushDisable(pVCpu);
5793 else
5794 Assert(VMMR0IsLogFlushDisabled(pVCpu));
5795 Log4Func(("vcpu[%RU32]\n", pVCpu->idCpu));
5796
5797 int rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
5798 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestRipRspRflags failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5799
5800 rc = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5801 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestControlRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5802
5803 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
5804 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSegmentRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5805
5806 rc = hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
5807 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestTableRegs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5808
5809 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
5810 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestDR7 failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5811
5812 rc = hmR0VmxSaveGuestSysenterMsrs(pVCpu, pMixedCtx);
5813 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestSysenterMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5814
5815 rc = hmR0VmxSaveGuestFSBaseMsr(pVCpu, pMixedCtx);
5816 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestFSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5817
5818 rc = hmR0VmxSaveGuestGSBaseMsr(pVCpu, pMixedCtx);
5819 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestGSBaseMsr failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5820
5821 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
5822 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestAutoLoadStoreMsrs failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5823
5824 rc = hmR0VmxSaveGuestActivityState(pVCpu, pMixedCtx);
5825 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestActivityState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5826
5827 rc = hmR0VmxSaveGuestApicState(pVCpu, pMixedCtx);
5828 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveGuestApicState failed! rc=%Rrc (pVCpu=%p)\n", rc, pVCpu), rc);
5829
5830 AssertMsg(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL,
5831 ("Missed guest state bits while saving state; residue %RX32\n", pVCpu->hm.s.vmx.fUpdatedGuestState));
5832
5833 if (VMMRZCallRing3IsEnabled(pVCpu))
5834 VMMR0LogFlushEnable(pVCpu);
5835
5836 return rc;
5837}
5838
5839
5840/**
5841 * Check per-VM and per-VCPU force flag actions that require us to go back to
5842 * ring-3 for one reason or another.
5843 *
5844 * @returns VBox status code (information status code included).
5845 * @retval VINF_SUCCESS if we don't have any actions that require going back to
5846 * ring-3.
5847 * @retval VINF_PGM_SYNC_CR3 if we have pending PGM CR3 sync.
5848 * @retval VINF_EM_PENDING_REQUEST if we have pending requests (like hardware
5849 * interrupts)
5850 * @retval VINF_PGM_POOL_FLUSH_PENDING if PGM is doing a pool flush and requires
5851 * all EMTs to be in ring-3.
5852 * @retval VINF_EM_RAW_TO_R3 if there is pending DMA requests.
5853 * @retval VINF_EM_NO_MEMORY PGM is out of memory, we need to return
5854 * to the EM loop.
5855 *
5856 * @param pVM Pointer to the VM.
5857 * @param pVCpu Pointer to the VMCPU.
5858 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
5859 * out-of-sync. Make sure to update the required fields
5860 * before using them.
5861 */
5862static int hmR0VmxCheckForceFlags(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
5863{
5864 Assert(VMMRZCallRing3IsEnabled(pVCpu));
5865
5866 if ( VM_FF_IS_PENDING(pVM, !pVCpu->hm.s.fSingleInstruction
5867 ? VM_FF_HP_R0_PRE_HM_MASK : VM_FF_HP_R0_PRE_HM_STEP_MASK)
5868 || VMCPU_FF_IS_PENDING(pVCpu, !pVCpu->hm.s.fSingleInstruction
5869 ? VMCPU_FF_HP_R0_PRE_HM_MASK : VMCPU_FF_HP_R0_PRE_HM_STEP_MASK) )
5870 {
5871 /* We need the control registers now, make sure the guest-CPU context is updated. */
5872 int rc3 = hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
5873 AssertRCReturn(rc3, rc3);
5874
5875 /* Pending HM CR3 sync. */
5876 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3))
5877 {
5878 int rc2 = PGMUpdateCR3(pVCpu, pMixedCtx->cr3);
5879 AssertMsgReturn(rc2 == VINF_SUCCESS || rc2 == VINF_PGM_SYNC_CR3,
5880 ("%Rrc\n", rc2), RT_FAILURE_NP(rc2) ? rc2 : VERR_IPE_UNEXPECTED_INFO_STATUS);
5881 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_CR3));
5882 }
5883
5884 /* Pending HM PAE PDPEs. */
5885 if (VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES))
5886 {
5887 PGMGstUpdatePaePdpes(pVCpu, &pVCpu->hm.s.aPdpes[0]);
5888 Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_UPDATE_PAE_PDPES));
5889 }
5890
5891 /* Pending PGM C3 sync. */
5892 if (VMCPU_FF_IS_PENDING(pVCpu,VMCPU_FF_PGM_SYNC_CR3 | VMCPU_FF_PGM_SYNC_CR3_NON_GLOBAL))
5893 {
5894 int rc2 = PGMSyncCR3(pVCpu, pMixedCtx->cr0, pMixedCtx->cr3, pMixedCtx->cr4, VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_PGM_SYNC_CR3));
5895 if (rc2 != VINF_SUCCESS)
5896 {
5897 AssertRC(rc2);
5898 Log4(("hmR0VmxCheckForceFlags: PGMSyncCR3 forcing us back to ring-3. rc2=%d\n", rc2));
5899 return rc2;
5900 }
5901 }
5902
5903 /* Pending HM-to-R3 operations (critsects, timers, EMT rendezvous etc.) */
5904 /* -XXX- what was that about single stepping? */
5905 if ( VM_FF_IS_PENDING(pVM, VM_FF_HM_TO_R3_MASK)
5906 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
5907 {
5908 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
5909 int rc2 = RT_UNLIKELY(VM_FF_IS_PENDING(pVM, VM_FF_PGM_NO_MEMORY)) ? VINF_EM_NO_MEMORY : VINF_EM_RAW_TO_R3;
5910 Log4(("hmR0VmxCheckForceFlags: HM_TO_R3 forcing us back to ring-3. rc=%d\n", rc2));
5911 return rc2;
5912 }
5913
5914 /* Pending VM request packets, such as hardware interrupts. */
5915 if ( VM_FF_IS_PENDING(pVM, VM_FF_REQUEST)
5916 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_REQUEST))
5917 {
5918 Log4(("hmR0VmxCheckForceFlags: Pending VM request forcing us back to ring-3\n"));
5919 return VINF_EM_PENDING_REQUEST;
5920 }
5921
5922 /* Pending PGM pool flushes. */
5923 if (VM_FF_IS_PENDING(pVM, VM_FF_PGM_POOL_FLUSH_PENDING))
5924 {
5925 Log4(("hmR0VmxCheckForceFlags: PGM pool flush pending forcing us back to ring-3\n"));
5926 return VINF_PGM_POOL_FLUSH_PENDING;
5927 }
5928
5929 /* Pending DMA requests. */
5930 if (VM_FF_IS_PENDING(pVM, VM_FF_PDM_DMA))
5931 {
5932 Log4(("hmR0VmxCheckForceFlags: Pending DMA request forcing us back to ring-3\n"));
5933 return VINF_EM_RAW_TO_R3;
5934 }
5935 }
5936
5937 /* Paranoia. */
5938 return VINF_SUCCESS;
5939}
5940
5941
5942/**
5943 * Converts any TRPM trap into a pending HM event. This is typically used when
5944 * entering from ring-3 (not longjmp returns).
5945 *
5946 * @param pVCpu Pointer to the VMCPU.
5947 */
5948static void hmR0VmxTrpmTrapToPendingEvent(PVMCPU pVCpu)
5949{
5950 Assert(TRPMHasTrap(pVCpu));
5951 Assert(!pVCpu->hm.s.Event.fPending);
5952
5953 uint8_t uVector;
5954 TRPMEVENT enmTrpmEvent;
5955 RTGCUINT uErrCode;
5956 RTGCUINTPTR GCPtrFaultAddress;
5957 uint8_t cbInstr;
5958
5959 int rc = TRPMQueryTrapAll(pVCpu, &uVector, &enmTrpmEvent, &uErrCode, &GCPtrFaultAddress, &cbInstr);
5960 AssertRC(rc);
5961
5962 /* Refer Intel spec. 24.8.3 "VM-entry Controls for Event Injection" for the format of u32IntrInfo. */
5963 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
5964 if (enmTrpmEvent == TRPM_TRAP)
5965 {
5966 switch (uVector)
5967 {
5968 case X86_XCPT_BP:
5969 case X86_XCPT_OF:
5970 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5971 break;
5972
5973 case X86_XCPT_PF:
5974 case X86_XCPT_DF:
5975 case X86_XCPT_TS:
5976 case X86_XCPT_NP:
5977 case X86_XCPT_SS:
5978 case X86_XCPT_GP:
5979 case X86_XCPT_AC:
5980 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
5981 /* no break! */
5982 default:
5983 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5984 break;
5985 }
5986 }
5987 else if (enmTrpmEvent == TRPM_HARDWARE_INT)
5988 {
5989 if (uVector == X86_XCPT_NMI)
5990 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5991 else
5992 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5993 }
5994 else if (enmTrpmEvent == TRPM_SOFTWARE_INT)
5995 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
5996 else
5997 AssertMsgFailed(("Invalid TRPM event type %d\n", enmTrpmEvent));
5998
5999 rc = TRPMResetTrap(pVCpu);
6000 AssertRC(rc);
6001 Log4(("TRPM->HM event: u32IntrInfo=%#RX32 enmTrpmEvent=%d cbInstr=%u uErrCode=%#RX32 GCPtrFaultAddress=%#RGv\n",
6002 u32IntrInfo, enmTrpmEvent, cbInstr, uErrCode, GCPtrFaultAddress));
6003
6004 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, uErrCode, GCPtrFaultAddress);
6005 STAM_COUNTER_DEC(&pVCpu->hm.s.StatInjectPendingReflect);
6006}
6007
6008
6009/**
6010 * Converts any pending HM event into a TRPM trap. Typically used when leaving
6011 * VT-x to execute any instruction.
6012 *
6013 * @param pvCpu Pointer to the VMCPU.
6014 */
6015static void hmR0VmxPendingEventToTrpmTrap(PVMCPU pVCpu)
6016{
6017 Assert(pVCpu->hm.s.Event.fPending);
6018
6019 uint32_t uVectorType = VMX_IDT_VECTORING_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
6020 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVCpu->hm.s.Event.u64IntrInfo);
6021 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVCpu->hm.s.Event.u64IntrInfo);
6022 uint32_t uErrorCode = pVCpu->hm.s.Event.u32ErrCode;
6023
6024 /* If a trap was already pending, we did something wrong! */
6025 Assert(TRPMQueryTrap(pVCpu, NULL /* pu8TrapNo */, NULL /* pEnmType */) == VERR_TRPM_NO_ACTIVE_TRAP);
6026
6027 TRPMEVENT enmTrapType;
6028 switch (uVectorType)
6029 {
6030 case VMX_IDT_VECTORING_INFO_TYPE_EXT_INT:
6031 case VMX_IDT_VECTORING_INFO_TYPE_NMI:
6032 enmTrapType = TRPM_HARDWARE_INT;
6033 break;
6034
6035 case VMX_IDT_VECTORING_INFO_TYPE_SW_INT:
6036 enmTrapType = TRPM_SOFTWARE_INT;
6037 break;
6038
6039 case VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT:
6040 case VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT: /* #BP and #OF */
6041 case VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT:
6042 enmTrapType = TRPM_TRAP;
6043 break;
6044
6045 default:
6046 AssertMsgFailed(("Invalid trap type %#x\n", uVectorType));
6047 enmTrapType = TRPM_32BIT_HACK;
6048 break;
6049 }
6050
6051 Log4(("HM event->TRPM: uVector=%#x enmTrapType=%d\n", uVector, enmTrapType));
6052
6053 int rc = TRPMAssertTrap(pVCpu, uVector, enmTrapType);
6054 AssertRC(rc);
6055
6056 if (fErrorCodeValid)
6057 TRPMSetErrorCode(pVCpu, uErrorCode);
6058
6059 if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
6060 && uVector == X86_XCPT_PF)
6061 {
6062 TRPMSetFaultAddress(pVCpu, pVCpu->hm.s.Event.GCPtrFaultAddress);
6063 }
6064 else if ( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6065 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
6066 || uVectorType == VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
6067 {
6068 AssertMsg( uVectorType == VMX_IDT_VECTORING_INFO_TYPE_SW_INT
6069 || (uVector == X86_XCPT_BP || uVector == X86_XCPT_OF),
6070 ("Invalid vector: uVector=%#x uVectorType=%#x\n", uVector, uVectorType));
6071 TRPMSetInstrLength(pVCpu, pVCpu->hm.s.Event.cbInstr);
6072 }
6073 pVCpu->hm.s.Event.fPending = false;
6074}
6075
6076
6077/**
6078 * Does the necessary state syncing before returning to ring-3 for any reason
6079 * (longjmp, preemption, voluntary exits to ring-3) from VT-x.
6080 *
6081 * @param pVM Pointer to the VM.
6082 * @param pVCpu Pointer to the VMCPU.
6083 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6084 * out-of-sync. Make sure to update the required fields
6085 * before using them.
6086 *
6087 * @remarks No-long-jmp zone!!!
6088 */
6089static void hmR0VmxLeave(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6090{
6091 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6092 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6093
6094 RTCPUID idCpu = RTMpCpuId();
6095 Log4Func(("HostCpuId=%u\n", idCpu));
6096
6097 /* Save the guest state if necessary. */
6098 if (pVCpu->hm.s.vmx.fUpdatedGuestState != HMVMX_UPDATED_GUEST_ALL)
6099 {
6100 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
6101 AssertRC(rc);
6102 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
6103 }
6104
6105 /* Restore host FPU state if necessary and resync on next R0 reentry .*/
6106 if (CPUMIsGuestFPUStateActive(pVCpu))
6107 {
6108 CPUMR0SaveGuestFPU(pVM, pVCpu, pMixedCtx);
6109 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
6110 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
6111 }
6112
6113 /* Restore host debug registers if necessary and resync on next R0 reentry. */
6114#ifdef VBOX_STRICT
6115 if (CPUMIsHyperDebugStateActive(pVCpu))
6116 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT);
6117#endif
6118 if (CPUMR0DebugStateMaybeSaveGuestAndRestoreHost(pVCpu, true /* save DR6 */))
6119 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
6120 Assert(!CPUMIsGuestDebugStateActive(pVCpu));
6121 Assert(!CPUMIsHyperDebugStateActive(pVCpu));
6122
6123 /* Restore host-state bits that VT-x only restores partially. */
6124 if ( (pVCpu->hm.s.vmx.fRestoreHostFlags & VMX_RESTORE_HOST_REQUIRED)
6125 && (pVCpu->hm.s.vmx.fRestoreHostFlags & ~VMX_RESTORE_HOST_REQUIRED))
6126 {
6127 Log4Func(("Restoring Host State: fRestoreHostFlags=%#RX32 HostCpuId=%u\n", pVCpu->hm.s.vmx.fRestoreHostFlags, idCpu));
6128 VMXRestoreHostState(pVCpu->hm.s.vmx.fRestoreHostFlags, &pVCpu->hm.s.vmx.RestoreHost);
6129 pVCpu->hm.s.vmx.fRestoreHostFlags = 0;
6130 }
6131
6132 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatEntry);
6133 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatLoadGuestState);
6134 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit1);
6135 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExit2);
6136 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitIO);
6137 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitMovCRx);
6138 STAM_PROFILE_ADV_SET_STOPPED(&pVCpu->hm.s.StatExitXcptNmi);
6139 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6140
6141 VMCPU_CMPXCHG_STATE(pVCpu, VMCPUSTATE_STARTED_HM, VMCPUSTATE_STARTED_EXEC);
6142
6143 /** @todo This kinda defeats the purpose of having preemption hooks.
6144 * The problem is, deregistering the hooks should be moved to a place that
6145 * lasts until the EMT is about to be destroyed not everytime while leaving HM
6146 * context.
6147 */
6148 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_ACTIVE)
6149 {
6150 int rc = VMXClearVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
6151 AssertRC(rc);
6152 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_CLEAR;
6153 Log4Func(("Cleared Vmcs. HostCpuId=%u\n", idCpu));
6154 }
6155 Assert(!(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_LAUNCHED));
6156 NOREF(idCpu);
6157}
6158
6159
6160/**
6161 * Leaves the VT-x session.
6162 *
6163 * @param pVM Pointer to the VM.
6164 * @param pVCpu Pointer to the VMCPU.
6165 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6166 * out-of-sync. Make sure to update the required fields
6167 * before using them.
6168 *
6169 * @remarks No-long-jmp zone!!!
6170 */
6171DECLINLINE(void) hmR0VmxLeaveSession(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6172{
6173 HM_DISABLE_PREEMPT_IF_NEEDED();
6174 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
6175 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6176
6177 /* When thread-context hooks are used, we can avoid doing the leave again if we had been preempted before
6178 and done this from the VMXR0ThreadCtxCallback(). */
6179 if (!pVCpu->hm.s.fLeaveDone)
6180 {
6181 hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
6182 pVCpu->hm.s.fLeaveDone = true;
6183 }
6184
6185 /* Deregister hook now that we've left HM context before re-enabling preemption. */
6186 /** @todo This is bad. Deregistering here means we need to VMCLEAR always
6187 * (longjmp/exit-to-r3) in VT-x which is not efficient. */
6188 if (VMMR0ThreadCtxHooksAreRegistered(pVCpu))
6189 VMMR0ThreadCtxHooksDeregister(pVCpu);
6190
6191 /* Leave HM context. This takes care of local init (term). */
6192 int rc = HMR0LeaveCpu(pVCpu);
6193 AssertRC(rc); NOREF(rc);
6194
6195 HM_RESTORE_PREEMPT_IF_NEEDED();
6196}
6197
6198
6199/**
6200 * Does the necessary state syncing before doing a longjmp to ring-3.
6201 *
6202 * @param pVM Pointer to the VM.
6203 * @param pVCpu Pointer to the VMCPU.
6204 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6205 * out-of-sync. Make sure to update the required fields
6206 * before using them.
6207 *
6208 * @remarks No-long-jmp zone!!!
6209 */
6210DECLINLINE(void) hmR0VmxLongJmpToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6211{
6212 hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
6213}
6214
6215
6216/**
6217 * Take necessary actions before going back to ring-3.
6218 *
6219 * An action requires us to go back to ring-3. This function does the necessary
6220 * steps before we can safely return to ring-3. This is not the same as longjmps
6221 * to ring-3, this is voluntary and prepares the guest so it may continue
6222 * executing outside HM (recompiler/IEM).
6223 *
6224 * @param pVM Pointer to the VM.
6225 * @param pVCpu Pointer to the VMCPU.
6226 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6227 * out-of-sync. Make sure to update the required fields
6228 * before using them.
6229 * @param rcExit The reason for exiting to ring-3. Can be
6230 * VINF_VMM_UNKNOWN_RING3_CALL.
6231 */
6232static void hmR0VmxExitToRing3(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, int rcExit)
6233{
6234 Assert(pVM);
6235 Assert(pVCpu);
6236 Assert(pMixedCtx);
6237 HMVMX_ASSERT_PREEMPT_SAFE();
6238
6239 if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_GUEST_STATE))
6240 {
6241 /* We've done what is required in hmR0VmxExitErrInvalidGuestState(). We're not going to continue guest execution... */
6242 return;
6243 }
6244 else if (RT_UNLIKELY(rcExit == VERR_VMX_INVALID_VMCS_PTR))
6245 {
6246 VMXGetActivatedVmcs(&pVCpu->hm.s.vmx.LastError.u64VMCSPhys);
6247 pVCpu->hm.s.vmx.LastError.u32VMCSRevision = *(uint32_t *)pVCpu->hm.s.vmx.pvVmcs;
6248 pVCpu->hm.s.vmx.LastError.idEnteredCpu = pVCpu->hm.s.idEnteredCpu;
6249 /* LastError.idCurrentCpu was updated in hmR0VmxPreRunGuestCommitted(). */
6250 return;
6251 }
6252
6253 /* Please, no longjumps here (any logging shouldn't flush jump back to ring-3). NO LOGGING BEFORE THIS POINT! */
6254 VMMRZCallRing3Disable(pVCpu);
6255 Log4(("hmR0VmxExitToRing3: pVCpu=%p idCpu=%RU32 rcExit=%d\n", pVCpu, pVCpu->idCpu, rcExit));
6256
6257 /* We need to do this only while truly exiting the "inner loop" back to ring-3 and -not- for any longjmp to ring3. */
6258 if (pVCpu->hm.s.Event.fPending)
6259 {
6260 hmR0VmxPendingEventToTrpmTrap(pVCpu);
6261 Assert(!pVCpu->hm.s.Event.fPending);
6262 }
6263
6264 /* Save guest state and restore host state bits. */
6265 hmR0VmxLeaveSession(pVM, pVCpu, pMixedCtx);
6266 STAM_COUNTER_DEC(&pVCpu->hm.s.StatSwitchLongJmpToR3);
6267
6268 /* Sync recompiler state. */
6269 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_TO_R3);
6270 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_SYSENTER_MSR
6271 | CPUM_CHANGED_LDTR
6272 | CPUM_CHANGED_GDTR
6273 | CPUM_CHANGED_IDTR
6274 | CPUM_CHANGED_TR
6275 | CPUM_CHANGED_HIDDEN_SEL_REGS);
6276 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_CR0);
6277 if ( pVM->hm.s.fNestedPaging
6278 && CPUMIsGuestPagingEnabledEx(pMixedCtx))
6279 {
6280 CPUMSetChangedFlags(pVCpu, CPUM_CHANGED_GLOBAL_TLB_FLUSH);
6281 }
6282
6283 /*
6284 * Clear the X86_EFL_TF if necessary .
6285 */
6286 if (pVCpu->hm.s.fClearTrapFlag)
6287 {
6288 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
6289 pMixedCtx->eflags.Bits.u1TF = 0;
6290 pVCpu->hm.s.fClearTrapFlag = false;
6291 }
6292 /** @todo there seems to be issues with the resume flag when the monitor trap
6293 * flag is pending without being used. Seen early in bios init when
6294 * accessing APIC page in prot mode. */
6295
6296 /* On our way back from ring-3 the following needs to be done. */
6297 if (rcExit == VINF_EM_RAW_INTERRUPT)
6298 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT;
6299 else
6300 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_HOST_CONTEXT | HM_CHANGED_ALL_GUEST;
6301
6302 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchExitToR3);
6303 VMMRZCallRing3Enable(pVCpu);
6304}
6305
6306
6307/**
6308 * VMMRZCallRing3() callback wrapper which saves the guest state before we
6309 * longjump to ring-3 and possibly get preempted.
6310 *
6311 * @param pVCpu Pointer to the VMCPU.
6312 * @param enmOperation The operation causing the ring-3 longjump.
6313 * @param pvUser The user argument (pointer to the possibly
6314 * out-of-date guest-CPU context).
6315 *
6316 * @remarks Must never be called with @a enmOperation ==
6317 * VMMCALLRING3_VM_R0_ASSERTION.
6318 */
6319DECLCALLBACK(void) hmR0VmxCallRing3Callback(PVMCPU pVCpu, VMMCALLRING3 enmOperation, void *pvUser)
6320{
6321 /* VMMRZCallRing3() already makes sure we never get called as a result of an longjmp due to an assertion. */
6322 Assert(pVCpu);
6323 Assert(pvUser);
6324 Assert(VMMRZCallRing3IsEnabled(pVCpu));
6325 HMVMX_ASSERT_PREEMPT_SAFE();
6326
6327 VMMRZCallRing3Disable(pVCpu);
6328 Assert(VMMR0IsLogFlushDisabled(pVCpu));
6329
6330 Log4(("hmR0VmxCallRing3Callback->hmR0VmxLongJmpToRing3 pVCpu=%p idCpu=%RU32\n", pVCpu, pVCpu->idCpu));
6331 hmR0VmxLongJmpToRing3(pVCpu->CTX_SUFF(pVM), pVCpu, (PCPUMCTX)pvUser);
6332
6333 VMMRZCallRing3Enable(pVCpu);
6334}
6335
6336
6337/**
6338 * Sets the interrupt-window exiting control in the VMCS which instructs VT-x to
6339 * cause a VM-exit as soon as the guest is in a state to receive interrupts.
6340 *
6341 * @param pVCpu Pointer to the VMCPU.
6342 */
6343DECLINLINE(void) hmR0VmxSetIntWindowExitVmcs(PVMCPU pVCpu)
6344{
6345 if (RT_LIKELY(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.msr.vmx_proc_ctls.n.allowed1 & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6346 {
6347 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT))
6348 {
6349 pVCpu->hm.s.vmx.u32ProcCtls |= VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
6350 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
6351 AssertRC(rc);
6352 }
6353 } /* else we will deliver interrupts whenever the guest exits next and is in a state to receive events. */
6354}
6355
6356
6357/**
6358 * Evaluates the event to be delivered to the guest and sets it as the pending
6359 * event.
6360 *
6361 * @param pVCpu Pointer to the VMCPU.
6362 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6363 * out-of-sync. Make sure to update the required fields
6364 * before using them.
6365 */
6366static void hmR0VmxEvaluatePendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6367{
6368 Assert(!pVCpu->hm.s.Event.fPending);
6369
6370 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
6371 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
6372 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6373 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6374
6375 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
6376 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
6377 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
6378 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
6379 Assert(!TRPMHasTrap(pVCpu));
6380
6381 /** @todo SMI. SMIs take priority over NMIs. */
6382 if (VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INTERRUPT_NMI)) /* NMI. NMIs take priority over regular interrupts . */
6383 {
6384 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6385 if ( !fBlockMovSS
6386 && !fBlockSti)
6387 {
6388 /* On some CPUs block-by-STI also blocks NMIs. See Intel spec. 26.3.1.5 "Checks On Guest Non-Register State". */
6389 Log4(("Pending NMI vcpu[%RU32]\n", pVCpu->idCpu));
6390 uint32_t u32IntrInfo = X86_XCPT_NMI | VMX_EXIT_INTERRUPTION_INFO_VALID;
6391 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6392
6393 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddres */);
6394 VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_INTERRUPT_NMI);
6395 }
6396 else
6397 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6398 }
6399 else if ( VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC))
6400 && !pVCpu->hm.s.fSingleInstruction)
6401 {
6402 /*
6403 * Check if the guest can receive external interrupts (PIC/APIC). Once we do PDMGetInterrupt() we -must- deliver
6404 * the interrupt ASAP. We must not execute any guest code until we inject the interrupt which is why it is
6405 * evaluated here and not set as pending, solely based on the force-flags.
6406 */
6407 int rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6408 AssertRC(rc);
6409 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6410 if ( !fBlockInt
6411 && !fBlockSti
6412 && !fBlockMovSS)
6413 {
6414 uint8_t u8Interrupt;
6415 rc = PDMGetInterrupt(pVCpu, &u8Interrupt);
6416 if (RT_SUCCESS(rc))
6417 {
6418 Log4(("Pending interrupt vcpu[%RU32] u8Interrupt=%#x \n", pVCpu->idCpu, u8Interrupt));
6419 uint32_t u32IntrInfo = u8Interrupt | VMX_EXIT_INTERRUPTION_INFO_VALID;
6420 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6421
6422 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrfaultAddress */);
6423 }
6424 else
6425 {
6426 /** @todo Does this actually happen? If not turn it into an assertion. */
6427 Assert(!VMCPU_FF_IS_PENDING(pVCpu, (VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC)));
6428 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchGuestIrq);
6429 }
6430 }
6431 else
6432 hmR0VmxSetIntWindowExitVmcs(pVCpu);
6433 }
6434}
6435
6436
6437/**
6438 * Injects any pending events into the guest if the guest is in a state to
6439 * receive them.
6440 *
6441 * @returns VBox status code (informational status codes included).
6442 * @param pVCpu Pointer to the VMCPU.
6443 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6444 * out-of-sync. Make sure to update the required fields
6445 * before using them.
6446 *
6447 * @remarks No-long-jump zone!!!
6448 */
6449static int hmR0VmxInjectPendingEvent(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6450{
6451 /* Get the current interruptibility-state of the guest and then figure out what can be injected. */
6452 uint32_t uIntrState = hmR0VmxGetGuestIntrState(pVCpu, pMixedCtx);
6453 bool fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6454 bool fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6455
6456 Assert(!fBlockSti || (pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS));
6457 Assert( !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI) /* We don't support block-by-NMI and SMI yet.*/
6458 && !(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI));
6459 Assert(!fBlockSti || pMixedCtx->eflags.Bits.u1IF); /* Cannot set block-by-STI when interrupts are disabled. */
6460 Assert(!TRPMHasTrap(pVCpu));
6461
6462 int rc = VINF_SUCCESS;
6463 if (pVCpu->hm.s.Event.fPending)
6464 {
6465#if defined(VBOX_STRICT) || defined(VBOX_WITH_STATISTICS)
6466 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVCpu->hm.s.Event.u64IntrInfo);
6467 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
6468 {
6469 rc = hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
6470 AssertRCReturn(rc, rc);
6471 const bool fBlockInt = !(pMixedCtx->eflags.u32 & X86_EFL_IF);
6472 Assert(!fBlockInt);
6473 Assert(!fBlockSti);
6474 Assert(!fBlockMovSS);
6475 }
6476 else if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
6477 {
6478 Assert(!fBlockSti);
6479 Assert(!fBlockMovSS);
6480 }
6481#endif
6482 Log4(("Injecting pending event vcpu[%RU32] u64IntrInfo=%#RX64\n", pVCpu->idCpu, pVCpu->hm.s.Event.u64IntrInfo));
6483 rc = hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, pVCpu->hm.s.Event.u64IntrInfo, pVCpu->hm.s.Event.cbInstr,
6484 pVCpu->hm.s.Event.u32ErrCode, pVCpu->hm.s.Event.GCPtrFaultAddress, &uIntrState);
6485 AssertRCReturn(rc, rc);
6486
6487 pVCpu->hm.s.Event.fPending = false;
6488
6489 /* Update the interruptibility-state as it could have been changed by
6490 hmR0VmxInjectEventVmcs() (e.g. real-on-v86 guest injecting software interrupts) */
6491 fBlockMovSS = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS);
6492 fBlockSti = !!(uIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI);
6493
6494#ifdef VBOX_WITH_STATISTICS
6495 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
6496 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectInterrupt);
6497 else
6498 STAM_COUNTER_INC(&pVCpu->hm.s.StatInjectXcpt);
6499#endif
6500 }
6501
6502 /* Delivery pending debug exception if the guest is single-stepping. Evaluate and set the BS bit. */
6503 int rc2 = VINF_SUCCESS;
6504 if ( fBlockSti
6505 || fBlockMovSS)
6506 {
6507 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
6508 {
6509 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RFLAGS);
6510 if (pMixedCtx->eflags.Bits.u1TF) /* We don't have any IA32_DEBUGCTL MSR for guests. Treat as all bits 0. */
6511 {
6512 /*
6513 * The pending-debug exceptions field is cleared on all VM-exits except VMX_EXIT_TPR_BELOW_THRESHOLD,
6514 * VMX_EXIT_MTF, VMX_EXIT_APIC_WRITE and VMX_EXIT_VIRTUALIZED_EOI.
6515 * See Intel spec. 27.3.4 "Saving Non-Register State".
6516 */
6517 rc2 = VMXWriteVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS);
6518 AssertRCReturn(rc, rc);
6519 }
6520 }
6521 else
6522 {
6523 /* We are single-stepping in the hypervisor debugger, clear interrupt inhibition as setting the BS bit would mean
6524 delivering a #DB to the guest upon VM-entry when it shouldn't be. */
6525 uIntrState = 0;
6526 }
6527 }
6528
6529 /*
6530 * There's no need to clear the VM entry-interruption information field here if we're not injecting anything.
6531 * VT-x clears the valid bit on every VM-exit. See Intel spec. 24.8.3 "VM-Entry Controls for Event Injection".
6532 */
6533 rc2 = hmR0VmxLoadGuestIntrState(pVCpu, uIntrState);
6534 AssertRC(rc2);
6535
6536 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6537 return rc;
6538}
6539
6540
6541/**
6542 * Sets an invalid-opcode (#UD) exception as pending-for-injection into the VM.
6543 *
6544 * @param pVCpu Pointer to the VMCPU.
6545 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6546 * out-of-sync. Make sure to update the required fields
6547 * before using them.
6548 */
6549DECLINLINE(void) hmR0VmxSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6550{
6551 uint32_t u32IntrInfo = X86_XCPT_UD | VMX_EXIT_INTERRUPTION_INFO_VALID;
6552 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6553}
6554
6555
6556/**
6557 * Injects a double-fault (#DF) exception into the VM.
6558 *
6559 * @returns VBox status code (informational status code included).
6560 * @param pVCpu Pointer to the VMCPU.
6561 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6562 * out-of-sync. Make sure to update the required fields
6563 * before using them.
6564 */
6565DECLINLINE(int) hmR0VmxInjectXcptDF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t *puIntrState)
6566{
6567 uint32_t u32IntrInfo = X86_XCPT_DF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6568 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6569 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6570 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */,
6571 puIntrState);
6572}
6573
6574
6575/**
6576 * Sets a debug (#DB) exception as pending-for-injection into the VM.
6577 *
6578 * @param pVCpu Pointer to the VMCPU.
6579 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6580 * out-of-sync. Make sure to update the required fields
6581 * before using them.
6582 */
6583DECLINLINE(void) hmR0VmxSetPendingXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx)
6584{
6585 uint32_t u32IntrInfo = X86_XCPT_DB | VMX_EXIT_INTERRUPTION_INFO_VALID;
6586 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6587 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, 0 /* cbInstr */, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6588}
6589
6590
6591/**
6592 * Sets an overflow (#OF) exception as pending-for-injection into the VM.
6593 *
6594 * @param pVCpu Pointer to the VMCPU.
6595 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6596 * out-of-sync. Make sure to update the required fields
6597 * before using them.
6598 * @param cbInstr The value of RIP that is to be pushed on the guest
6599 * stack.
6600 */
6601DECLINLINE(void) hmR0VmxSetPendingXcptOF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint32_t cbInstr)
6602{
6603 uint32_t u32IntrInfo = X86_XCPT_OF | VMX_EXIT_INTERRUPTION_INFO_VALID;
6604 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6605 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6606}
6607
6608
6609/**
6610 * Injects a general-protection (#GP) fault into the VM.
6611 *
6612 * @returns VBox status code (informational status code included).
6613 * @param pVCpu Pointer to the VMCPU.
6614 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6615 * out-of-sync. Make sure to update the required fields
6616 * before using them.
6617 * @param u32ErrorCode The error code associated with the #GP.
6618 */
6619DECLINLINE(int) hmR0VmxInjectXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, bool fErrorCodeValid, uint32_t u32ErrorCode,
6620 uint32_t *puIntrState)
6621{
6622 uint32_t u32IntrInfo = X86_XCPT_GP | VMX_EXIT_INTERRUPTION_INFO_VALID;
6623 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6624 if (fErrorCodeValid)
6625 u32IntrInfo |= VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6626 return hmR0VmxInjectEventVmcs(pVCpu, pMixedCtx, u32IntrInfo, 0 /* cbInstr */, u32ErrorCode, 0 /* GCPtrFaultAddress */,
6627 puIntrState);
6628}
6629
6630
6631/**
6632 * Sets a software interrupt (INTn) as pending-for-injection into the VM.
6633 *
6634 * @param pVCpu Pointer to the VMCPU.
6635 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
6636 * out-of-sync. Make sure to update the required fields
6637 * before using them.
6638 * @param uVector The software interrupt vector number.
6639 * @param cbInstr The value of RIP that is to be pushed on the guest
6640 * stack.
6641 */
6642DECLINLINE(void) hmR0VmxSetPendingIntN(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint16_t uVector, uint32_t cbInstr)
6643{
6644 uint32_t u32IntrInfo = uVector | VMX_EXIT_INTERRUPTION_INFO_VALID;
6645 if ( uVector == X86_XCPT_BP
6646 || uVector == X86_XCPT_OF)
6647 {
6648 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6649 }
6650 else
6651 u32IntrInfo |= (VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT << VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT);
6652 hmR0VmxSetPendingEvent(pVCpu, u32IntrInfo, cbInstr, 0 /* u32ErrCode */, 0 /* GCPtrFaultAddress */);
6653}
6654
6655
6656/**
6657 * Pushes a 2-byte value onto the real-mode (in virtual-8086 mode) guest's
6658 * stack.
6659 *
6660 * @returns VBox status code (information status code included).
6661 * @retval VINF_EM_RESET if pushing a value to the stack caused a triple-fault.
6662 * @param pVM Pointer to the VM.
6663 * @param pMixedCtx Pointer to the guest-CPU context.
6664 * @param uValue The value to push to the guest stack.
6665 */
6666DECLINLINE(int) hmR0VmxRealModeGuestStackPush(PVM pVM, PCPUMCTX pMixedCtx, uint16_t uValue)
6667{
6668 /*
6669 * The stack limit is 0xffff in real-on-virtual 8086 mode. Real-mode with weird stack limits cannot be run in
6670 * virtual 8086 mode in VT-x. See Intel spec. 26.3.1.2 "Checks on Guest Segment Registers".
6671 * See Intel Instruction reference for PUSH and Intel spec. 22.33.1 "Segment Wraparound".
6672 */
6673 if (pMixedCtx->sp == 1)
6674 return VINF_EM_RESET;
6675 pMixedCtx->sp -= sizeof(uint16_t); /* May wrap around which is expected behaviour. */
6676 int rc = PGMPhysSimpleWriteGCPhys(pVM, pMixedCtx->ss.u64Base + pMixedCtx->sp, &uValue, sizeof(uint16_t));
6677 AssertRCReturn(rc, rc);
6678 return rc;
6679}
6680
6681
6682/**
6683 * Injects an event into the guest upon VM-entry by updating the relevant fields
6684 * in the VM-entry area in the VMCS.
6685 *
6686 * @returns VBox status code (informational error codes included).
6687 * @retval VINF_SUCCESS if the event is successfully injected into the VMCS.
6688 * @retval VINF_EM_RESET if event injection resulted in a triple-fault.
6689 *
6690 * @param pVCpu Pointer to the VMCPU.
6691 * @param pMixedCtx Pointer to the guest-CPU context. The data may
6692 * be out-of-sync. Make sure to update the required
6693 * fields before using them.
6694 * @param u64IntrInfo The VM-entry interruption-information field.
6695 * @param cbInstr The VM-entry instruction length in bytes (for
6696 * software interrupts, exceptions and privileged
6697 * software exceptions).
6698 * @param u32ErrCode The VM-entry exception error code.
6699 * @param GCPtrFaultAddress The page-fault address for #PF exceptions.
6700 * @param puIntrState Pointer to the current guest interruptibility-state.
6701 * This interruptibility-state will be updated if
6702 * necessary. This cannot not be NULL.
6703 *
6704 * @remarks No-long-jump zone!!!
6705 * @remarks Requires CR0!
6706 */
6707static int hmR0VmxInjectEventVmcs(PVMCPU pVCpu, PCPUMCTX pMixedCtx, uint64_t u64IntrInfo, uint32_t cbInstr,
6708 uint32_t u32ErrCode, RTGCUINTREG GCPtrFaultAddress, uint32_t *puIntrState)
6709{
6710 /* Intel spec. 24.8.3 "VM-Entry Controls for Event Injection" specifies the interruption-information field to be 32-bits. */
6711 AssertMsg(u64IntrInfo >> 32 == 0, ("%#RX64\n", u64IntrInfo));
6712 Assert(puIntrState);
6713 uint32_t u32IntrInfo = (uint32_t)u64IntrInfo;
6714
6715 const uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(u32IntrInfo);
6716 const uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo);
6717
6718#ifdef VBOX_STRICT
6719 /* Validate the error-code-valid bit for hardware exceptions. */
6720 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT)
6721 {
6722 switch (uVector)
6723 {
6724 case X86_XCPT_PF:
6725 case X86_XCPT_DF:
6726 case X86_XCPT_TS:
6727 case X86_XCPT_NP:
6728 case X86_XCPT_SS:
6729 case X86_XCPT_GP:
6730 case X86_XCPT_AC:
6731 AssertMsg(VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo),
6732 ("Error-code-valid bit not set for exception that has an error code uVector=%#x\n", uVector));
6733 /* fallthru */
6734 default:
6735 break;
6736 }
6737 }
6738#endif
6739
6740 /* Cannot inject an NMI when block-by-MOV SS is in effect. */
6741 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6742 || !(*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS));
6743
6744 STAM_COUNTER_INC(&pVCpu->hm.s.paStatInjectedIrqsR0[uVector & MASK_INJECT_IRQ_STAT]);
6745
6746 /* We require CR0 to check if the guest is in real-mode. */
6747 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
6748 AssertRCReturn(rc, rc);
6749
6750 /*
6751 * Hardware interrupts & exceptions cannot be delivered through the software interrupt redirection bitmap to the real
6752 * mode task in virtual-8086 mode. We must jump to the interrupt handler in the (real-mode) guest.
6753 * See Intel spec. 20.3 "Interrupt and Exception handling in Virtual-8086 Mode" for interrupt & exception classes.
6754 * See Intel spec. 20.1.4 "Interrupt and Exception Handling" for real-mode interrupt handling.
6755 */
6756 if (CPUMIsGuestInRealModeEx(pMixedCtx))
6757 {
6758 PVM pVM = pVCpu->CTX_SUFF(pVM);
6759 if (!pVM->hm.s.vmx.fUnrestrictedGuest)
6760 {
6761 Assert(PDMVmmDevHeapIsEnabled(pVM));
6762 Assert(pVM->hm.s.vmx.pRealModeTSS);
6763
6764 /* We require RIP, RSP, RFLAGS, CS, IDTR. Save the required ones from the VMCS. */
6765 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
6766 rc |= hmR0VmxSaveGuestTableRegs(pVCpu, pMixedCtx);
6767 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
6768 AssertRCReturn(rc, rc);
6769 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState & HMVMX_UPDATED_GUEST_RIP);
6770
6771 /* Check if the interrupt handler is present in the IVT (real-mode IDT). IDT limit is (4N - 1). */
6772 const size_t cbIdtEntry = 4;
6773 if (uVector * cbIdtEntry + (cbIdtEntry - 1) > pMixedCtx->idtr.cbIdt)
6774 {
6775 /* If we are trying to inject a #DF with no valid IDT entry, return a triple-fault. */
6776 if (uVector == X86_XCPT_DF)
6777 return VINF_EM_RESET;
6778 else if (uVector == X86_XCPT_GP)
6779 {
6780 /* If we're injecting a #GP with no valid IDT entry, inject a double-fault. */
6781 return hmR0VmxInjectXcptDF(pVCpu, pMixedCtx, puIntrState);
6782 }
6783
6784 /* If we're injecting an interrupt/exception with no valid IDT entry, inject a general-protection fault. */
6785 /* No error codes for exceptions in real-mode. See Intel spec. 20.1.4 "Interrupt and Exception Handling" */
6786 return hmR0VmxInjectXcptGP(pVCpu, pMixedCtx, false /* fErrCodeValid */, 0 /* u32ErrCode */, puIntrState);
6787 }
6788
6789 /* Software exceptions (#BP and #OF exceptions thrown as a result of INT3 or INTO) */
6790 uint16_t uGuestIp = pMixedCtx->ip;
6791 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT)
6792 {
6793 Assert(uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
6794 /* #BP and #OF are both benign traps, we need to resume the next instruction. */
6795 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6796 }
6797 else if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT)
6798 uGuestIp = pMixedCtx->ip + (uint16_t)cbInstr;
6799
6800 /* Get the code segment selector and offset from the IDT entry for the interrupt handler. */
6801 uint16_t offIdtEntry = 0;
6802 RTSEL selIdtEntry = 0;
6803 RTGCPHYS GCPhysIdtEntry = (RTGCPHYS)pMixedCtx->idtr.pIdt + uVector * cbIdtEntry;
6804 rc = PGMPhysSimpleReadGCPhys(pVM, &offIdtEntry, GCPhysIdtEntry, sizeof(offIdtEntry));
6805 rc |= PGMPhysSimpleReadGCPhys(pVM, &selIdtEntry, GCPhysIdtEntry + 2, sizeof(selIdtEntry));
6806 AssertRCReturn(rc, rc);
6807
6808 /* Construct the stack frame for the interrupt/exception handler. */
6809 rc = hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->eflags.u32);
6810 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, pMixedCtx->cs.Sel);
6811 rc |= hmR0VmxRealModeGuestStackPush(pVM, pMixedCtx, uGuestIp);
6812 AssertRCReturn(rc, rc);
6813
6814 /* Clear the required eflag bits and jump to the interrupt/exception handler. */
6815 if (rc == VINF_SUCCESS)
6816 {
6817 pMixedCtx->eflags.u32 &= ~(X86_EFL_IF | X86_EFL_TF | X86_EFL_RF | X86_EFL_AC);
6818 pMixedCtx->rip = offIdtEntry;
6819 pMixedCtx->cs.Sel = selIdtEntry;
6820 pMixedCtx->cs.u64Base = selIdtEntry << cbIdtEntry;
6821 if ( uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6822 && uVector == X86_XCPT_PF)
6823 {
6824 pMixedCtx->cr2 = GCPtrFaultAddress;
6825 }
6826
6827 /* If any other guest-state bits are changed here, make sure to update
6828 hmR0VmxPreRunGuestCommitted() when thread-context hooks are used. */
6829 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS
6830 | HM_CHANGED_GUEST_RIP
6831 | HM_CHANGED_GUEST_RFLAGS
6832 | HM_CHANGED_GUEST_RSP;
6833
6834 /* We're clearing interrupts, which means no block-by-STI interrupt-inhibition. */
6835 if (*puIntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
6836 {
6837 Assert( uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI
6838 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
6839 Log4(("Clearing inhibition due to STI.\n"));
6840 *puIntrState &= ~VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI;
6841 }
6842 Log4(("Injecting real-mode: u32IntrInfo=%#x u32ErrCode=%#x instrlen=%#x\n", u32IntrInfo, u32ErrCode, cbInstr));
6843 }
6844 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RESET);
6845 return rc;
6846 }
6847 else
6848 {
6849 /*
6850 * For unrestricted execution enabled CPUs running real-mode guests, we must not set the deliver-error-code bit.
6851 * See Intel spec. 26.2.1.3 "VM-Entry Control Fields".
6852 */
6853 u32IntrInfo &= ~VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID;
6854 }
6855 }
6856
6857 /* Validate. */
6858 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(u32IntrInfo)); /* Bit 31 (Valid bit) must be set by caller. */
6859 Assert(!VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(u32IntrInfo)); /* Bit 12 MBZ. */
6860 Assert(!(u32IntrInfo & 0x7ffff000)); /* Bits 30:12 MBZ. */
6861
6862 /* Inject. */
6863 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, u32IntrInfo);
6864 if (VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(u32IntrInfo))
6865 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE, u32ErrCode);
6866 rc |= VMXWriteVmcs32(VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH, cbInstr);
6867
6868 if ( VMX_EXIT_INTERRUPTION_INFO_TYPE(u32IntrInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT
6869 && uVector == X86_XCPT_PF)
6870 {
6871 pMixedCtx->cr2 = GCPtrFaultAddress;
6872 }
6873
6874 Log4(("Injecting vcpu[%RU32] u32IntrInfo=%#x u32ErrCode=%#x cbInstr=%#x pMixedCtx->uCR2=%#RX64\n", pVCpu->idCpu,
6875 u32IntrInfo, u32ErrCode, cbInstr, pMixedCtx->cr2));
6876
6877 AssertRCReturn(rc, rc);
6878 return rc;
6879}
6880
6881
6882/**
6883 * Enters the VT-x session.
6884 *
6885 * @returns VBox status code.
6886 * @param pVM Pointer to the VM.
6887 * @param pVCpu Pointer to the VMCPU.
6888 * @param pCpu Pointer to the CPU info struct.
6889 */
6890VMMR0DECL(int) VMXR0Enter(PVM pVM, PVMCPU pVCpu, PHMGLOBALCPUINFO pCpu)
6891{
6892 AssertPtr(pVM);
6893 AssertPtr(pVCpu);
6894 Assert(pVM->hm.s.vmx.fSupported);
6895 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6896 NOREF(pCpu);
6897
6898 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
6899
6900#ifdef VBOX_STRICT
6901 /* Make sure we're in VMX root mode. */
6902 RTCCUINTREG u32HostCR4 = ASMGetCR4();
6903 if (!(u32HostCR4 & X86_CR4_VMXE))
6904 {
6905 LogRel(("VMXR0Enter: X86_CR4_VMXE bit in CR4 is not set!\n"));
6906 return VERR_VMX_X86_CR4_VMXE_CLEARED;
6907 }
6908#endif
6909
6910 /*
6911 * Load the VCPU's VMCS as the current (and active) one.
6912 */
6913 Assert(pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR);
6914 int rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
6915 if (RT_FAILURE(rc))
6916 return rc;
6917
6918 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
6919 pVCpu->hm.s.fLeaveDone = false;
6920 Log4Func(("Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
6921
6922 return VINF_SUCCESS;
6923}
6924
6925
6926/**
6927 * The thread-context callback (only on platforms which support it).
6928 *
6929 * @param enmEvent The thread-context event.
6930 * @param pVCpu Pointer to the VMCPU.
6931 * @param fGlobalInit Whether global VT-x/AMD-V init. was used.
6932 * @thread EMT.
6933 */
6934VMMR0DECL(void) VMXR0ThreadCtxCallback(RTTHREADCTXEVENT enmEvent, PVMCPU pVCpu, bool fGlobalInit)
6935{
6936 switch (enmEvent)
6937 {
6938 case RTTHREADCTXEVENT_PREEMPTING:
6939 {
6940 /** @todo Stats. */
6941 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6942 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
6943 VMCPU_ASSERT_EMT(pVCpu);
6944
6945 PVM pVM = pVCpu->CTX_SUFF(pVM);
6946 PCPUMCTX pMixedCtx = CPUMQueryGuestCtxPtr(pVCpu);
6947
6948 /* No longjmps (logger flushes, locks) in this fragile context. */
6949 VMMRZCallRing3Disable(pVCpu);
6950 Log4Func(("Preempting: HostCpuId=%u\n", RTMpCpuId()));
6951
6952 /* Save the guest-state, restore host-state (FPU, debug etc.). */
6953 if (!pVCpu->hm.s.fLeaveDone)
6954 {
6955 hmR0VmxLeave(pVM, pVCpu, pMixedCtx);
6956 pVCpu->hm.s.fLeaveDone = true;
6957 }
6958
6959 /* Leave HM context, takes care of local init (term). */
6960 int rc = HMR0LeaveCpu(pVCpu);
6961 AssertRC(rc); NOREF(rc);
6962
6963 /* Restore longjmp state. */
6964 VMMRZCallRing3Enable(pVCpu);
6965 break;
6966 }
6967
6968 case RTTHREADCTXEVENT_RESUMED:
6969 {
6970 /** @todo Stats. */
6971 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
6972 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
6973 VMCPU_ASSERT_EMT(pVCpu);
6974
6975 /* No longjmps here, as we don't want to trigger preemption (& its hook) while resuming. */
6976 VMMRZCallRing3Disable(pVCpu);
6977 Log4Func(("Resumed: HostCpuId=%u\n", RTMpCpuId()));
6978
6979 /* Initialize the bare minimum state required for HM. This takes care of
6980 initializing VT-x if necessary (onlined CPUs, local init etc.) */
6981 int rc = HMR0EnterCpu(pVCpu);
6982 AssertRC(rc);
6983 Assert(pVCpu->hm.s.fContextUseFlags & (HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE));
6984
6985 /* Load the active VMCS as the current one. */
6986 if (pVCpu->hm.s.vmx.uVmcsState & HMVMX_VMCS_STATE_CLEAR)
6987 {
6988 rc = VMXActivateVmcs(pVCpu->hm.s.vmx.HCPhysVmcs);
6989 AssertRC(rc); NOREF(rc);
6990 pVCpu->hm.s.vmx.uVmcsState = HMVMX_VMCS_STATE_ACTIVE;
6991 Log4Func(("Resumed: Activated Vmcs. HostCpuId=%u\n", RTMpCpuId()));
6992 }
6993 pVCpu->hm.s.fLeaveDone = false;
6994 VMMRZCallRing3Enable(pVCpu);
6995 break;
6996 }
6997
6998 default:
6999 break;
7000 }
7001}
7002
7003
7004/**
7005 * Saves the host state in the VMCS host-state.
7006 * Sets up the VM-exit MSR-load area.
7007 *
7008 * The CPU state will be loaded from these fields on every successful VM-exit.
7009 *
7010 * @returns VBox status code.
7011 * @param pVM Pointer to the VM.
7012 * @param pVCpu Pointer to the VMCPU.
7013 *
7014 * @remarks No-long-jump zone!!!
7015 */
7016static int hmR0VmxSaveHostState(PVM pVM, PVMCPU pVCpu)
7017{
7018 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7019
7020 if (!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT))
7021 return VINF_SUCCESS;
7022
7023 int rc = hmR0VmxSaveHostControlRegs(pVM, pVCpu);
7024 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostControlRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7025
7026 rc = hmR0VmxSaveHostSegmentRegs(pVM, pVCpu);
7027 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostSegmentRegisters failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7028
7029 rc = hmR0VmxSaveHostMsrs(pVM, pVCpu);
7030 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSaveHostMsrs failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7031
7032 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_HOST_CONTEXT;
7033 return rc;
7034}
7035
7036
7037/**
7038 * Saves the host state in the VMCS host-state.
7039 *
7040 * @returns VBox status code.
7041 * @param pVM Pointer to the VM.
7042 * @param pVCpu Pointer to the VMCPU.
7043 *
7044 * @remarks No-long-jump zone!!!
7045 */
7046VMMR0DECL(int) VMXR0SaveHostState(PVM pVM, PVMCPU pVCpu)
7047{
7048 AssertPtr(pVM);
7049 AssertPtr(pVCpu);
7050
7051 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
7052
7053 /* When thread-context hooks are available, this is done later (when preemption/interrupts are disabled). */
7054 if (!VMMR0ThreadCtxHooksAreRegistered(pVCpu))
7055 {
7056 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7057 return hmR0VmxSaveHostState(pVM, pVCpu);
7058 }
7059 return VINF_SUCCESS;
7060}
7061
7062
7063/**
7064 * Loads the guest state into the VMCS guest-state area. The CPU state will be
7065 * loaded from these fields on every successful VM-entry.
7066 *
7067 * Sets up the VM-entry MSR-load and VM-exit MSR-store areas.
7068 * Sets up the VM-entry controls.
7069 * Sets up the appropriate VMX non-root function to execute guest code based on
7070 * the guest CPU mode.
7071 *
7072 * @returns VBox status code.
7073 * @param pVM Pointer to the VM.
7074 * @param pVCpu Pointer to the VMCPU.
7075 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7076 * out-of-sync. Make sure to update the required fields
7077 * before using them.
7078 *
7079 * @remarks No-long-jump zone!!!
7080 */
7081static int hmR0VmxLoadGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7082{
7083 AssertPtr(pVM);
7084 AssertPtr(pVCpu);
7085 AssertPtr(pMixedCtx);
7086 HMVMX_ASSERT_PREEMPT_SAFE();
7087
7088#ifdef LOG_ENABLED
7089 /** @todo r=ramshankar: I'm not able to use VMMRZCallRing3Disable() here,
7090 * probably not initialized yet? Anyway this will do for now.
7091 *
7092 * Update: Should be possible once VMXR0LoadGuestState() is removed as an
7093 * interface and disable ring-3 calls when thread-context hooks are not
7094 * available. */
7095 bool fCallerDisabledLogFlush = VMMR0IsLogFlushDisabled(pVCpu);
7096 VMMR0LogFlushDisable(pVCpu);
7097#endif
7098
7099 LogFlowFunc(("pVM=%p pVCpu=%p\n", pVM, pVCpu));
7100
7101 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatLoadGuestState, x);
7102
7103 /* Determine real-on-v86 mode. */
7104 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = false;
7105 if ( !pVM->hm.s.vmx.fUnrestrictedGuest
7106 && CPUMIsGuestInRealModeEx(pMixedCtx))
7107 {
7108 pVCpu->hm.s.vmx.RealMode.fRealOnV86Active = true;
7109 }
7110
7111 /*
7112 * Load the guest-state into the VMCS.
7113 * Any ordering dependency among the sub-functions below must be explicitly stated using comments.
7114 * Ideally, assert that the cross-dependent bits are up to date at the point of using it.
7115 */
7116 int rc = hmR0VmxLoadGuestEntryCtls(pVCpu, pMixedCtx);
7117 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestEntryCtls! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7118
7119 rc = hmR0VmxLoadGuestExitCtls(pVCpu, pMixedCtx);
7120 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupExitCtls failed! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7121
7122 rc = hmR0VmxLoadGuestActivityState(pVCpu, pMixedCtx);
7123 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestActivityState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7124
7125 rc = hmR0VmxLoadGuestCR3AndCR4(pVCpu, pMixedCtx);
7126 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestCR3AndCR4: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7127
7128 /* Assumes CR0 is up-to-date (strict builds require CR0 for segment register validation checks). */
7129 rc = hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
7130 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestSegmentRegs: rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7131
7132 rc = hmR0VmxLoadGuestMsrs(pVCpu, pMixedCtx);
7133 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestMsrs! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7134
7135 rc = hmR0VmxLoadGuestApicState(pVCpu, pMixedCtx);
7136 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestApicState! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7137
7138 /*
7139 * Loading Rflags here is fine, even though Rflags.TF might depend on guest debug state (which is not loaded here).
7140 * It is re-evaluated and updated if necessary in hmR0VmxLoadSharedState().
7141 */
7142 rc = hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
7143 AssertLogRelMsgRCReturn(rc, ("hmR0VmxLoadGuestRipRspRflags! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7144
7145 rc = hmR0VmxSetupVMRunHandler(pVCpu, pMixedCtx);
7146 AssertLogRelMsgRCReturn(rc, ("hmR0VmxSetupVMRunHandler! rc=%Rrc (pVM=%p pVCpu=%p)\n", rc, pVM, pVCpu), rc);
7147
7148 /* Clear any unused and reserved bits. */
7149 pVCpu->hm.s.fContextUseFlags &= ~HM_CHANGED_GUEST_CR2;
7150
7151#ifdef LOG_ENABLED
7152 /* Only reenable log-flushing if the caller has it enabled. */
7153 if (!fCallerDisabledLogFlush)
7154 VMMR0LogFlushEnable(pVCpu);
7155#endif
7156
7157 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatLoadGuestState, x);
7158 return rc;
7159}
7160
7161
7162/**
7163 * Loads the state shared between the host and guest into the VMCS.
7164 *
7165 * @param pVM Pointer to the VM.
7166 * @param pVCpu Pointer to the VMCPU.
7167 * @param pCtx Pointer to the guest-CPU context.
7168 *
7169 * @remarks No-long-jump zone!!!
7170 */
7171static void hmR0VmxLoadSharedState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7172{
7173 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7174 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7175
7176 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_CR0)
7177 {
7178 int rc = hmR0VmxLoadSharedCR0(pVCpu, pCtx);
7179 AssertRC(rc);
7180 }
7181
7182 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_DEBUG)
7183 {
7184 int rc = hmR0VmxLoadSharedDebugState(pVCpu, pCtx);
7185 AssertRC(rc);
7186
7187 /* Loading shared debug bits might have changed eflags.TF bit for debugging purposes. */
7188 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_GUEST_RFLAGS)
7189 {
7190 rc = hmR0VmxLoadGuestRflags(pVCpu, pCtx);
7191 AssertRC(rc);
7192 }
7193 }
7194
7195 AssertMsg(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE), ("fContextUseFlags=%#x\n",
7196 pVCpu->hm.s.fContextUseFlags));
7197}
7198
7199
7200/**
7201 * Worker for loading the guest-state bits in the inner VT-x execution loop.
7202 *
7203 * @param pVM Pointer to the VM.
7204 * @param pVCpu Pointer to the VMCPU.
7205 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7206 * out-of-sync. Make sure to update the required fields
7207 * before using them.
7208 */
7209DECLINLINE(void) hmR0VmxLoadGuestStateOptimal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx)
7210{
7211 HMVMX_ASSERT_PREEMPT_SAFE();
7212
7213 Log5(("LoadFlags=%#RX32\n", pVCpu->hm.s.fContextUseFlags));
7214#ifdef HMVMX_SYNC_FULL_GUEST_STATE
7215 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
7216#endif
7217
7218 if (pVCpu->hm.s.fContextUseFlags == HM_CHANGED_GUEST_RIP)
7219 {
7220 int rc = hmR0VmxLoadGuestRip(pVCpu, pMixedCtx);
7221 AssertRC(rc);
7222 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadMinimal);
7223 }
7224 else if (pVCpu->hm.s.fContextUseFlags)
7225 {
7226 int rc = hmR0VmxLoadGuestState(pVM, pVCpu, pMixedCtx);
7227 AssertRC(rc);
7228 STAM_COUNTER_INC(&pVCpu->hm.s.StatLoadFull);
7229 }
7230
7231 /* All the guest state bits should be loaded except maybe the host context and shared host/guest bits. */
7232 AssertMsg( !(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_ALL_GUEST)
7233 || !(pVCpu->hm.s.fContextUseFlags & ~(HM_CHANGED_HOST_CONTEXT | HM_CHANGED_HOST_GUEST_SHARED_STATE)),
7234 ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
7235
7236#ifdef HMVMX_ALWAYS_CHECK_GUEST_STATE
7237 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVM, pVCpu, pMixedCtx);
7238 if (uInvalidReason != VMX_IGS_REASON_NOT_FOUND)
7239 Log4(("hmR0VmxCheckGuestState returned %#x\n", uInvalidReason));
7240#endif
7241}
7242
7243
7244/**
7245 * Does the preparations before executing guest code in VT-x.
7246 *
7247 * This may cause longjmps to ring-3 and may even result in rescheduling to the
7248 * recompiler. We must be cautious what we do here regarding committing
7249 * guest-state information into the VMCS assuming we assuredly execute the
7250 * guest in VT-x. If we fall back to the recompiler after updating the VMCS and
7251 * clearing the common-state (TRPM/forceflags), we must undo those changes so
7252 * that the recompiler can (and should) use them when it resumes guest
7253 * execution. Otherwise such operations must be done when we can no longer
7254 * exit to ring-3.
7255 *
7256 * @returns VBox status code (informational status codes included).
7257 * @retval VINF_SUCCESS if we can proceed with running the guest.
7258 * @retval VINF_EM_RESET if a triple-fault occurs while injecting a double-fault
7259 * into the guest.
7260 * @retval VINF_* scheduling changes, we have to go back to ring-3.
7261 *
7262 * @param pVM Pointer to the VM.
7263 * @param pVCpu Pointer to the VMCPU.
7264 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7265 * out-of-sync. Make sure to update the required fields
7266 * before using them.
7267 * @param pVmxTransient Pointer to the VMX transient structure.
7268 *
7269 * @remarks Called with preemption disabled.
7270 */
7271static int hmR0VmxPreRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7272{
7273 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7274
7275#ifdef VBOX_WITH_2X_4GB_ADDR_SPACE_IN_R0
7276 PGMRZDynMapFlushAutoSet(pVCpu);
7277#endif
7278
7279 /* Check force flag actions that might require us to go back to ring-3. */
7280 int rc = hmR0VmxCheckForceFlags(pVM, pVCpu, pMixedCtx);
7281 if (rc != VINF_SUCCESS)
7282 return rc;
7283
7284#ifndef IEM_VERIFICATION_MODE_FULL
7285 /* Setup the Virtualized APIC accesses. pMixedCtx->msrApicBase is always up-to-date. It's not part of the VMCS. */
7286 if ( pVCpu->hm.s.vmx.u64MsrApicBase != pMixedCtx->msrApicBase
7287 && (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC))
7288 {
7289 Assert(pVM->hm.s.vmx.HCPhysApicAccess);
7290 RTGCPHYS GCPhysApicBase;
7291 GCPhysApicBase = pMixedCtx->msrApicBase;
7292 GCPhysApicBase &= PAGE_BASE_GC_MASK;
7293
7294 /* Unalias any existing mapping. */
7295 rc = PGMHandlerPhysicalReset(pVM, GCPhysApicBase);
7296 AssertRCReturn(rc, rc);
7297
7298 /* Map the HC APIC-access page into the GC space, this also updates the shadow page tables if necessary. */
7299 Log4(("Mapped HC APIC-access page into GC: GCPhysApicBase=%#RGv\n", GCPhysApicBase));
7300 rc = IOMMMIOMapMMIOHCPage(pVM, pVCpu, GCPhysApicBase, pVM->hm.s.vmx.HCPhysApicAccess, X86_PTE_RW | X86_PTE_P);
7301 AssertRCReturn(rc, rc);
7302
7303 pVCpu->hm.s.vmx.u64MsrApicBase = pMixedCtx->msrApicBase;
7304 }
7305#endif /* !IEM_VERIFICATION_MODE_FULL */
7306
7307 /* Load the guest state bits, we can handle longjmps/getting preempted here. */
7308 hmR0VmxLoadGuestStateOptimal(pVM, pVCpu, pMixedCtx);
7309
7310 /*
7311 * Evaluate events as pending-for-injection into the guest. Toggling of force-flags here is safe as long as
7312 * we update TRPM on premature exits to ring-3 before executing guest code. We must NOT restore the force-flags.
7313 */
7314 if (TRPMHasTrap(pVCpu))
7315 hmR0VmxTrpmTrapToPendingEvent(pVCpu);
7316 else if (!pVCpu->hm.s.Event.fPending)
7317 hmR0VmxEvaluatePendingEvent(pVCpu, pMixedCtx);
7318
7319 /*
7320 * We disable interrupts so that we don't miss any interrupts that would flag preemption (IPI/timers etc.)
7321 * when thread-context hooks aren't used and we've been running with preemption disabled for a while.
7322 *
7323 * We need to check for force-flags that could've possible been altered since we last checked them (e.g.
7324 * by PDMGetInterrupt() leaving the PDM critical section, see @bugref{6398}).
7325 *
7326 * We also check a couple of other force-flags as a last opportunity to get the EMT back to ring-3 before
7327 * executing guest code.
7328 */
7329 pVmxTransient->uEflags = ASMIntDisableFlags();
7330 if ( VM_FF_IS_PENDING(pVM, VM_FF_EMT_RENDEZVOUS | VM_FF_TM_VIRTUAL_SYNC)
7331 || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HM_TO_R3_MASK))
7332 {
7333 ASMSetFlags(pVmxTransient->uEflags);
7334 STAM_COUNTER_INC(&pVCpu->hm.s.StatSwitchHmToR3FF);
7335 return VINF_EM_RAW_TO_R3;
7336 }
7337 else if (RTThreadPreemptIsPending(NIL_RTTHREAD))
7338 {
7339 ASMSetFlags(pVmxTransient->uEflags);
7340 STAM_COUNTER_INC(&pVCpu->hm.s.StatPendingHostIrq);
7341 return VINF_EM_RAW_INTERRUPT;
7342 }
7343
7344 /*
7345 * No more longjmps or returns to ring-3 (that can continue guest execution) from this point!!!
7346 *
7347 * Event injection might result in triple-faulting the VM (real-on-v86 case), which is why it's
7348 * done here and not in hmR0VmxPreRunGuestCommitted() which doesn't expect failures.
7349 */
7350 rc = hmR0VmxInjectPendingEvent(pVCpu, pMixedCtx);
7351 if (RT_UNLIKELY(rc != VINF_SUCCESS))
7352 ASMSetFlags(pVmxTransient->uEflags);
7353 return rc;
7354}
7355
7356
7357/**
7358 * Prepares to run guest code in VT-x and we've committed to doing so. This
7359 * means there is no backing out to ring-3 or anywhere else at this
7360 * point.
7361 *
7362 * @param pVM Pointer to the VM.
7363 * @param pVCpu Pointer to the VMCPU.
7364 * @param pMixedCtx Pointer to the guest-CPU context. The data may be
7365 * out-of-sync. Make sure to update the required fields
7366 * before using them.
7367 * @param pVmxTransient Pointer to the VMX transient structure.
7368 *
7369 * @remarks Called with preemption disabled.
7370 * @remarks No-long-jump zone!!!
7371 */
7372static void hmR0VmxPreRunGuestCommitted(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7373{
7374 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7375 Assert(VMMR0IsLogFlushDisabled(pVCpu));
7376 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
7377
7378 VMCPU_ASSERT_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
7379 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_EXEC); /* Indicate the start of guest execution. */
7380
7381 /*
7382 * Load the host state bits as we may've been preempted (only happens when
7383 * thread-context hooks are used).
7384 */
7385 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT)
7386 {
7387 Assert(VMMR0ThreadCtxHooksAreRegistered(pVCpu));
7388 int rc = hmR0VmxSaveHostState(pVM, pVCpu);
7389 AssertRC(rc);
7390 }
7391 Assert(!(pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_CONTEXT));
7392
7393 /*
7394 * If we are injecting events to a real-on-v86 mode guest, we may have to update
7395 * RIP and some other registers, i.e. hmR0VmxInjectPendingEvent()->hmR0VmxInjectEventVmcs().
7396 * Reload only the necessary state, the assertion will catch if other parts of the code
7397 * change.
7398 */
7399 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
7400 {
7401 hmR0VmxLoadGuestRipRspRflags(pVCpu, pMixedCtx);
7402 hmR0VmxLoadGuestSegmentRegs(pVCpu, pMixedCtx);
7403 }
7404
7405 /* Load the state shared between host and guest (FPU, debug). */
7406 if (pVCpu->hm.s.fContextUseFlags & HM_CHANGED_HOST_GUEST_SHARED_STATE)
7407 hmR0VmxLoadSharedState(pVM, pVCpu, pMixedCtx);
7408 AssertMsg(!pVCpu->hm.s.fContextUseFlags, ("fContextUseFlags=%#x\n", pVCpu->hm.s.fContextUseFlags));
7409
7410 /*
7411 * Cache the TPR-shadow for checking on every VM-exit if it might have changed.
7412 */
7413 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
7414 pVmxTransient->u8GuestTpr = pVCpu->hm.s.vmx.pbVirtApic[0x80];
7415
7416 if ( pVmxTransient->fUpdateTscOffsettingAndPreemptTimer
7417 || HMR0GetCurrentCpu()->idCpu != pVCpu->hm.s.idLastCpu)
7418 {
7419 hmR0VmxUpdateTscOffsettingAndPreemptTimer(pVCpu, pMixedCtx);
7420 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = false;
7421 }
7422
7423 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, true); /* Used for TLB-shootdowns, set this across the world switch. */
7424 hmR0VmxFlushTaggedTlb(pVCpu); /* Invalidate the appropriate guest entries from the TLB. */
7425
7426 RTCPUID idCurrentCpu = HMR0GetCurrentCpu()->idCpu;
7427 Assert(idCurrentCpu == pVCpu->hm.s.idLastCpu);
7428 pVCpu->hm.s.vmx.LastError.idCurrentCpu = idCurrentCpu; /* Update the error reporting info. with the current host CPU. */
7429
7430 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatEntry, &pVCpu->hm.s.StatInGC, x);
7431
7432 TMNotifyStartOfExecution(pVCpu); /* Finally, notify TM to resume its clocks as we're about
7433 to start executing. */
7434
7435#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
7436 /*
7437 * Save the current Host TSC_AUX and write the guest TSC_AUX to the host, so that
7438 * RDTSCPs (that don't cause exits) reads the guest MSR. See @bugref{3324}.
7439 */
7440 if ( (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
7441 && !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
7442 {
7443 pVCpu->hm.s.u64HostTscAux = ASMRdMsr(MSR_K8_TSC_AUX);
7444 uint64_t u64HostTscAux = 0;
7445 int rc2 = CPUMQueryGuestMsr(pVCpu, MSR_K8_TSC_AUX, &u64HostTscAux);
7446 AssertRC(rc2);
7447 ASMWrMsr(MSR_K8_TSC_AUX, u64HostTscAux);
7448 }
7449#endif
7450}
7451
7452
7453/**
7454 * Performs some essential restoration of state after running guest code in
7455 * VT-x.
7456 *
7457 * @param pVM Pointer to the VM.
7458 * @param pVCpu Pointer to the VMCPU.
7459 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7460 * out-of-sync. Make sure to update the required fields
7461 * before using them.
7462 * @param pVmxTransient Pointer to the VMX transient structure.
7463 * @param rcVMRun Return code of VMLAUNCH/VMRESUME.
7464 *
7465 * @remarks Called with interrupts disabled.
7466 * @remarks No-long-jump zone!!! This function will however re-enable longjmps
7467 * unconditionally when it is safe to do so.
7468 */
7469static void hmR0VmxPostRunGuest(PVM pVM, PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, int rcVMRun)
7470{
7471 Assert(!VMMRZCallRing3IsEnabled(pVCpu));
7472
7473 ASMAtomicWriteBool(&pVCpu->hm.s.fCheckedTLBFlush, false); /* See HMInvalidatePageOnAllVCpus(): used for TLB-shootdowns. */
7474 ASMAtomicIncU32(&pVCpu->hm.s.cWorldSwitchExits); /* Initialized in vmR3CreateUVM(): used for TLB-shootdowns. */
7475 pVCpu->hm.s.vmx.fUpdatedGuestState = 0; /* Exits/longjmps to ring-3 requires saving the guest state. */
7476 pVmxTransient->fVmcsFieldsRead = 0; /* Transient fields need to be read from the VMCS. */
7477 pVmxTransient->fVectoringPF = false; /* Vectoring page-fault needs to be determined later. */
7478
7479 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT))
7480 {
7481#ifndef VBOX_WITH_AUTO_MSR_LOAD_RESTORE
7482 /* Restore host's TSC_AUX. */
7483 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP)
7484 ASMWrMsr(MSR_K8_TSC_AUX, pVCpu->hm.s.u64HostTscAux);
7485#endif
7486 /** @todo Find a way to fix hardcoding a guestimate. */
7487 TMCpuTickSetLastSeen(pVCpu, ASMReadTSC()
7488 + pVCpu->hm.s.vmx.u64TSCOffset - 0x400 /* guestimate of world switch overhead in clock ticks */);
7489 }
7490
7491 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatInGC, &pVCpu->hm.s.StatExit1, x);
7492 TMNotifyEndOfExecution(pVCpu); /* Notify TM that the guest is no longer running. */
7493 Assert(!(ASMGetFlags() & X86_EFL_IF));
7494 VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HM);
7495
7496 pVCpu->hm.s.vmx.fRestoreHostFlags |= VMX_RESTORE_HOST_REQUIRED; /* Host state messed up by VT-x, we must restore. */
7497 pVCpu->hm.s.vmx.uVmcsState |= HMVMX_VMCS_STATE_LAUNCHED; /* Use VMRESUME instead of VMLAUNCH in the next run. */
7498 ASMSetFlags(pVmxTransient->uEflags); /* Enable interrupts. */
7499 VMMRZCallRing3Enable(pVCpu); /* It is now safe to do longjmps to ring-3!!! */
7500
7501 /* Save the basic VM-exit reason. Refer Intel spec. 24.9.1 "Basic VM-exit Information". */
7502 uint32_t uExitReason;
7503 int rc = VMXReadVmcs32(VMX_VMCS32_RO_EXIT_REASON, &uExitReason);
7504 rc |= hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
7505 AssertRC(rc);
7506 pVmxTransient->uExitReason = (uint16_t)VMX_EXIT_REASON_BASIC(uExitReason);
7507 pVmxTransient->fVMEntryFailed = !!VMX_ENTRY_INTERRUPTION_INFO_VALID(pVmxTransient->uEntryIntrInfo);
7508
7509 /* If the VMLAUNCH/VMRESUME failed, we can bail out early. This does -not- cover VMX_EXIT_ERR_*. */
7510 if (RT_UNLIKELY(rcVMRun != VINF_SUCCESS))
7511 {
7512 Log4(("VM-entry failure: pVCpu=%p idCpu=%RU32 rcVMRun=%Rrc fVMEntryFailed=%RTbool\n", pVCpu, pVCpu->idCpu, rcVMRun,
7513 pVmxTransient->fVMEntryFailed));
7514 return;
7515 }
7516
7517 if (RT_LIKELY(!pVmxTransient->fVMEntryFailed))
7518 {
7519 /* Update the guest interruptibility-state from the VMCS. */
7520 hmR0VmxSaveGuestIntrState(pVCpu, pMixedCtx);
7521#if defined(HMVMX_SYNC_FULL_GUEST_STATE) || defined(HMVMX_SAVE_FULL_GUEST_STATE)
7522 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
7523 AssertRC(rc);
7524#endif
7525 /*
7526 * If the TPR was raised by the guest, it wouldn't cause a VM-exit immediately. Instead we sync the TPR lazily whenever
7527 * we eventually get a VM-exit for any reason. This maybe expensive as PDMApicSetTPR() can longjmp to ring-3 and which is
7528 * why it's done here as it's easier and no less efficient to deal with it here than making hmR0VmxSaveGuestState()
7529 * cope with longjmps safely (see VMCPU_FF_HM_UPDATE_CR3 handling).
7530 */
7531 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
7532 && pVmxTransient->u8GuestTpr != pVCpu->hm.s.vmx.pbVirtApic[0x80])
7533 {
7534 rc = PDMApicSetTPR(pVCpu, pVCpu->hm.s.vmx.pbVirtApic[0x80]);
7535 AssertRC(rc);
7536 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
7537 }
7538 }
7539}
7540
7541
7542
7543/**
7544 * Runs the guest code using VT-x the normal way.
7545 *
7546 * @returns VBox status code.
7547 * @param pVM Pointer to the VM.
7548 * @param pVCpu Pointer to the VMCPU.
7549 * @param pCtx Pointer to the guest-CPU context.
7550 *
7551 * @note Mostly the same as hmR0VmxRunGuestCodeStep.
7552 * @remarks Called with preemption disabled.
7553 */
7554static int hmR0VmxRunGuestCodeNormal(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7555{
7556 VMXTRANSIENT VmxTransient;
7557 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
7558 int rc = VERR_INTERNAL_ERROR_5;
7559 uint32_t cLoops = 0;
7560
7561 for (;; cLoops++)
7562 {
7563 Assert(!HMR0SuspendPending());
7564 HMVMX_ASSERT_CPU_SAFE();
7565
7566 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
7567 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
7568 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
7569 if (rc != VINF_SUCCESS)
7570 break;
7571
7572 /*
7573 * No longjmps to ring-3 from this point on!!!
7574 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
7575 * This also disables flushing of the R0-logger instance (if any).
7576 */
7577 VMMRZCallRing3Disable(pVCpu);
7578 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
7579
7580 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
7581 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
7582
7583 /*
7584 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
7585 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
7586 */
7587 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
7588 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
7589 {
7590 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
7591 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
7592 return rc;
7593 }
7594
7595 /* Handle the VM-exit. */
7596 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
7597 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
7598 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
7599 HMVMX_START_EXIT_DISPATCH_PROF();
7600#ifdef HMVMX_USE_FUNCTION_TABLE
7601 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
7602#else
7603 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
7604#endif
7605 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
7606 if (rc != VINF_SUCCESS)
7607 break;
7608 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
7609 {
7610 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
7611 rc = VINF_EM_RAW_INTERRUPT;
7612 break;
7613 }
7614 }
7615
7616 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
7617 return rc;
7618}
7619
7620
7621/**
7622 * Single steps guest code using VT-x.
7623 *
7624 * @returns VBox status code.
7625 * @param pVM Pointer to the VM.
7626 * @param pVCpu Pointer to the VMCPU.
7627 * @param pCtx Pointer to the guest-CPU context.
7628 *
7629 * @note Mostly the same as hmR0VmxRunGuestCodeNormal.
7630 * @remarks Called with preemption disabled.
7631 */
7632static int hmR0VmxRunGuestCodeStep(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7633{
7634 VMXTRANSIENT VmxTransient;
7635 VmxTransient.fUpdateTscOffsettingAndPreemptTimer = true;
7636 int rc = VERR_INTERNAL_ERROR_5;
7637 uint32_t cLoops = 0;
7638 uint16_t uCsStart = pCtx->cs.Sel;
7639 uint64_t uRipStart = pCtx->rip;
7640
7641 for (;; cLoops++)
7642 {
7643 Assert(!HMR0SuspendPending());
7644 HMVMX_ASSERT_CPU_SAFE();
7645
7646 /* Preparatory work for running guest code, this may return to ring-3 for some last minute updates. */
7647 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatEntry, x);
7648 rc = hmR0VmxPreRunGuest(pVM, pVCpu, pCtx, &VmxTransient);
7649 if (rc != VINF_SUCCESS)
7650 break;
7651
7652 /*
7653 * No longjmps to ring-3 from this point on!!!
7654 * Asserts() will still longjmp to ring-3 (but won't return), which is intentional, better than a kernel panic.
7655 * This also disables flushing of the R0-logger instance (if any).
7656 */
7657 VMMRZCallRing3Disable(pVCpu);
7658 hmR0VmxPreRunGuestCommitted(pVM, pVCpu, pCtx, &VmxTransient);
7659
7660 rc = hmR0VmxRunGuest(pVM, pVCpu, pCtx);
7661 /* The guest-CPU context is now outdated, 'pCtx' is to be treated as 'pMixedCtx' from this point on!!! */
7662
7663 /*
7664 * Restore any residual host-state and save any bits shared between host and guest into the guest-CPU state.
7665 * This will also re-enable longjmps to ring-3 when it has reached a safe point!!!
7666 */
7667 hmR0VmxPostRunGuest(pVM, pVCpu, pCtx, &VmxTransient, rc);
7668 if (RT_UNLIKELY(rc != VINF_SUCCESS)) /* Check for errors with running the VM (VMLAUNCH/VMRESUME). */
7669 {
7670 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit1, x);
7671 hmR0VmxReportWorldSwitchError(pVM, pVCpu, rc, pCtx, &VmxTransient);
7672 return rc;
7673 }
7674
7675 /* Handle the VM-exit. */
7676 AssertMsg(VmxTransient.uExitReason <= VMX_EXIT_MAX, ("%#x\n", VmxTransient.uExitReason));
7677 STAM_COUNTER_INC(&pVCpu->hm.s.paStatExitReasonR0[VmxTransient.uExitReason & MASK_EXITREASON_STAT]);
7678 STAM_PROFILE_ADV_STOP_START(&pVCpu->hm.s.StatExit1, &pVCpu->hm.s.StatExit2, x);
7679 HMVMX_START_EXIT_DISPATCH_PROF();
7680#ifdef HMVMX_USE_FUNCTION_TABLE
7681 rc = g_apfnVMExitHandlers[VmxTransient.uExitReason](pVCpu, pCtx, &VmxTransient);
7682#else
7683 rc = hmR0VmxHandleExit(pVCpu, pCtx, &VmxTransient, VmxTransient.uExitReason);
7684#endif
7685 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExit2, x);
7686 if (rc != VINF_SUCCESS)
7687 break;
7688 else if (cLoops > pVM->hm.s.cMaxResumeLoops)
7689 {
7690 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMaxResume);
7691 rc = VINF_EM_RAW_INTERRUPT;
7692 break;
7693 }
7694
7695 /*
7696 * Did the RIP change, if so, consider it a single step.
7697 * Otherwise, make sure one of the TFs gets set.
7698 */
7699 int rc2 = hmR0VmxSaveGuestRip(pVCpu, pCtx);
7700 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pCtx);
7701 AssertRCReturn(rc2, rc2);
7702 if ( pCtx->rip != uRipStart
7703 || pCtx->cs.Sel != uCsStart)
7704 {
7705 rc = VINF_EM_DBG_STEPPED;
7706 break;
7707 }
7708 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
7709 }
7710
7711 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatEntry, x);
7712 return rc;
7713}
7714
7715
7716/**
7717 * Runs the guest code using VT-x.
7718 *
7719 * @returns VBox status code.
7720 * @param pVM Pointer to the VM.
7721 * @param pVCpu Pointer to the VMCPU.
7722 * @param pCtx Pointer to the guest-CPU context.
7723 *
7724 * @remarks Called with preemption disabled.
7725 */
7726VMMR0DECL(int) VMXR0RunGuestCode(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7727{
7728 Assert(VMMRZCallRing3IsEnabled(pVCpu));
7729 Assert(pVCpu->hm.s.vmx.fUpdatedGuestState == HMVMX_UPDATED_GUEST_ALL);
7730 HMVMX_ASSERT_PREEMPT_SAFE();
7731
7732 VMMRZCallRing3SetNotification(pVCpu, hmR0VmxCallRing3Callback, pCtx);
7733
7734 int rc;
7735 if (!pVCpu->hm.s.fSingleInstruction && !DBGFIsStepping(pVCpu))
7736 rc = hmR0VmxRunGuestCodeNormal(pVM, pVCpu, pCtx);
7737 else
7738 rc = hmR0VmxRunGuestCodeStep(pVM, pVCpu, pCtx);
7739
7740 if (rc == VERR_EM_INTERPRETER)
7741 rc = VINF_EM_RAW_EMULATE_INSTR;
7742 else if (rc == VINF_EM_RESET)
7743 rc = VINF_EM_TRIPLE_FAULT;
7744
7745 hmR0VmxExitToRing3(pVM, pVCpu, pCtx, rc);
7746 VMMRZCallRing3RemoveNotification(pVCpu);
7747 return rc;
7748}
7749
7750
7751#ifndef HMVMX_USE_FUNCTION_TABLE
7752DECLINLINE(int) hmR0VmxHandleExit(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient, uint32_t rcReason)
7753{
7754 int rc;
7755 switch (rcReason)
7756 {
7757 case VMX_EXIT_EPT_MISCONFIG: rc = hmR0VmxExitEptMisconfig(pVCpu, pMixedCtx, pVmxTransient); break;
7758 case VMX_EXIT_EPT_VIOLATION: rc = hmR0VmxExitEptViolation(pVCpu, pMixedCtx, pVmxTransient); break;
7759 case VMX_EXIT_IO_INSTR: rc = hmR0VmxExitIoInstr(pVCpu, pMixedCtx, pVmxTransient); break;
7760 case VMX_EXIT_CPUID: rc = hmR0VmxExitCpuid(pVCpu, pMixedCtx, pVmxTransient); break;
7761 case VMX_EXIT_RDTSC: rc = hmR0VmxExitRdtsc(pVCpu, pMixedCtx, pVmxTransient); break;
7762 case VMX_EXIT_RDTSCP: rc = hmR0VmxExitRdtscp(pVCpu, pMixedCtx, pVmxTransient); break;
7763 case VMX_EXIT_APIC_ACCESS: rc = hmR0VmxExitApicAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7764 case VMX_EXIT_XCPT_OR_NMI: rc = hmR0VmxExitXcptOrNmi(pVCpu, pMixedCtx, pVmxTransient); break;
7765 case VMX_EXIT_MOV_CRX: rc = hmR0VmxExitMovCRx(pVCpu, pMixedCtx, pVmxTransient); break;
7766 case VMX_EXIT_EXT_INT: rc = hmR0VmxExitExtInt(pVCpu, pMixedCtx, pVmxTransient); break;
7767 case VMX_EXIT_INT_WINDOW: rc = hmR0VmxExitIntWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7768 case VMX_EXIT_MWAIT: rc = hmR0VmxExitMwait(pVCpu, pMixedCtx, pVmxTransient); break;
7769 case VMX_EXIT_MONITOR: rc = hmR0VmxExitMonitor(pVCpu, pMixedCtx, pVmxTransient); break;
7770 case VMX_EXIT_TASK_SWITCH: rc = hmR0VmxExitTaskSwitch(pVCpu, pMixedCtx, pVmxTransient); break;
7771 case VMX_EXIT_PREEMPT_TIMER: rc = hmR0VmxExitPreemptTimer(pVCpu, pMixedCtx, pVmxTransient); break;
7772 case VMX_EXIT_RDMSR: rc = hmR0VmxExitRdmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7773 case VMX_EXIT_WRMSR: rc = hmR0VmxExitWrmsr(pVCpu, pMixedCtx, pVmxTransient); break;
7774 case VMX_EXIT_MOV_DRX: rc = hmR0VmxExitMovDRx(pVCpu, pMixedCtx, pVmxTransient); break;
7775 case VMX_EXIT_TPR_BELOW_THRESHOLD: rc = hmR0VmxExitTprBelowThreshold(pVCpu, pMixedCtx, pVmxTransient); break;
7776 case VMX_EXIT_HLT: rc = hmR0VmxExitHlt(pVCpu, pMixedCtx, pVmxTransient); break;
7777 case VMX_EXIT_INVD: rc = hmR0VmxExitInvd(pVCpu, pMixedCtx, pVmxTransient); break;
7778 case VMX_EXIT_INVLPG: rc = hmR0VmxExitInvlpg(pVCpu, pMixedCtx, pVmxTransient); break;
7779 case VMX_EXIT_RSM: rc = hmR0VmxExitRsm(pVCpu, pMixedCtx, pVmxTransient); break;
7780 case VMX_EXIT_MTF: rc = hmR0VmxExitMtf(pVCpu, pMixedCtx, pVmxTransient); break;
7781 case VMX_EXIT_PAUSE: rc = hmR0VmxExitPause(pVCpu, pMixedCtx, pVmxTransient); break;
7782 case VMX_EXIT_XDTR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7783 case VMX_EXIT_TR_ACCESS: rc = hmR0VmxExitXdtrAccess(pVCpu, pMixedCtx, pVmxTransient); break;
7784 case VMX_EXIT_WBINVD: rc = hmR0VmxExitWbinvd(pVCpu, pMixedCtx, pVmxTransient); break;
7785 case VMX_EXIT_XSETBV: rc = hmR0VmxExitXsetbv(pVCpu, pMixedCtx, pVmxTransient); break;
7786 case VMX_EXIT_RDRAND: rc = hmR0VmxExitRdrand(pVCpu, pMixedCtx, pVmxTransient); break;
7787 case VMX_EXIT_INVPCID: rc = hmR0VmxExitInvpcid(pVCpu, pMixedCtx, pVmxTransient); break;
7788 case VMX_EXIT_GETSEC: rc = hmR0VmxExitGetsec(pVCpu, pMixedCtx, pVmxTransient); break;
7789 case VMX_EXIT_RDPMC: rc = hmR0VmxExitRdpmc(pVCpu, pMixedCtx, pVmxTransient); break;
7790
7791 case VMX_EXIT_TRIPLE_FAULT: rc = hmR0VmxExitTripleFault(pVCpu, pMixedCtx, pVmxTransient); break;
7792 case VMX_EXIT_NMI_WINDOW: rc = hmR0VmxExitNmiWindow(pVCpu, pMixedCtx, pVmxTransient); break;
7793 case VMX_EXIT_INIT_SIGNAL: rc = hmR0VmxExitInitSignal(pVCpu, pMixedCtx, pVmxTransient); break;
7794 case VMX_EXIT_SIPI: rc = hmR0VmxExitSipi(pVCpu, pMixedCtx, pVmxTransient); break;
7795 case VMX_EXIT_IO_SMI: rc = hmR0VmxExitIoSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7796 case VMX_EXIT_SMI: rc = hmR0VmxExitSmi(pVCpu, pMixedCtx, pVmxTransient); break;
7797 case VMX_EXIT_ERR_MSR_LOAD: rc = hmR0VmxExitErrMsrLoad(pVCpu, pMixedCtx, pVmxTransient); break;
7798 case VMX_EXIT_ERR_INVALID_GUEST_STATE: rc = hmR0VmxExitErrInvalidGuestState(pVCpu, pMixedCtx, pVmxTransient); break;
7799 case VMX_EXIT_ERR_MACHINE_CHECK: rc = hmR0VmxExitErrMachineCheck(pVCpu, pMixedCtx, pVmxTransient); break;
7800
7801 case VMX_EXIT_VMCALL:
7802 case VMX_EXIT_VMCLEAR:
7803 case VMX_EXIT_VMLAUNCH:
7804 case VMX_EXIT_VMPTRLD:
7805 case VMX_EXIT_VMPTRST:
7806 case VMX_EXIT_VMREAD:
7807 case VMX_EXIT_VMRESUME:
7808 case VMX_EXIT_VMWRITE:
7809 case VMX_EXIT_VMXOFF:
7810 case VMX_EXIT_VMXON:
7811 case VMX_EXIT_INVEPT:
7812 case VMX_EXIT_INVVPID:
7813 case VMX_EXIT_VMFUNC:
7814 rc = hmR0VmxExitSetPendingXcptUD(pVCpu, pMixedCtx, pVmxTransient);
7815 break;
7816 default:
7817 rc = hmR0VmxExitErrUndefined(pVCpu, pMixedCtx, pVmxTransient);
7818 break;
7819 }
7820 return rc;
7821}
7822#endif
7823
7824#ifdef DEBUG
7825/* Is there some generic IPRT define for this that are not in Runtime/internal/\* ?? */
7826# define HMVMX_ASSERT_PREEMPT_CPUID_VAR() \
7827 RTCPUID const idAssertCpu = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId()
7828
7829# define HMVMX_ASSERT_PREEMPT_CPUID() \
7830 do \
7831 { \
7832 RTCPUID const idAssertCpuNow = RTThreadPreemptIsEnabled(NIL_RTTHREAD) ? NIL_RTCPUID : RTMpCpuId(); \
7833 AssertMsg(idAssertCpu == idAssertCpuNow, ("VMX %#x, %#x\n", idAssertCpu, idAssertCpuNow)); \
7834 } while (0)
7835
7836# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() \
7837 do { \
7838 AssertPtr(pVCpu); \
7839 AssertPtr(pMixedCtx); \
7840 AssertPtr(pVmxTransient); \
7841 Assert(pVmxTransient->fVMEntryFailed == false); \
7842 Assert(ASMIntAreEnabled()); \
7843 HMVMX_ASSERT_PREEMPT_SAFE(); \
7844 HMVMX_ASSERT_PREEMPT_CPUID_VAR(); \
7845 Log4Func(("vcpu[%RU32] -v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v-v\n", pVCpu->idCpu)); \
7846 HMVMX_ASSERT_PREEMPT_SAFE(); \
7847 if (VMMR0IsLogFlushDisabled(pVCpu)) \
7848 HMVMX_ASSERT_PREEMPT_CPUID(); \
7849 HMVMX_STOP_EXIT_DISPATCH_PROF(); \
7850 } while (0)
7851
7852# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() \
7853 do { \
7854 Log4Func(("\n")); \
7855 } while(0)
7856#else /* Release builds */
7857# define HMVMX_VALIDATE_EXIT_HANDLER_PARAMS() do { HMVMX_STOP_EXIT_DISPATCH_PROF(); } while(0)
7858# define HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS() do { } while(0)
7859#endif
7860
7861
7862/**
7863 * Advances the guest RIP after reading it from the VMCS.
7864 *
7865 * @returns VBox status code.
7866 * @param pVCpu Pointer to the VMCPU.
7867 * @param pMixedCtx Pointer to the guest-CPU context. The data maybe
7868 * out-of-sync. Make sure to update the required fields
7869 * before using them.
7870 * @param pVmxTransient Pointer to the VMX transient structure.
7871 *
7872 * @remarks No-long-jump zone!!!
7873 */
7874DECLINLINE(int) hmR0VmxAdvanceGuestRip(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
7875{
7876 int rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
7877 rc |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
7878 AssertRCReturn(rc, rc);
7879
7880 pMixedCtx->rip += pVmxTransient->cbInstr;
7881 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
7882 return rc;
7883}
7884
7885
7886/**
7887 * Tries to determine what part of the guest-state VT-x has deemed as invalid
7888 * and update error record fields accordingly.
7889 *
7890 * @return VMX_IGS_* return codes.
7891 * @retval VMX_IGS_REASON_NOT_FOUND if this function could not find anything
7892 * wrong with the guest state.
7893 *
7894 * @param pVM Pointer to the VM.
7895 * @param pVCpu Pointer to the VMCPU.
7896 * @param pCtx Pointer to the guest-CPU state.
7897 */
7898static uint32_t hmR0VmxCheckGuestState(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx)
7899{
7900#define HMVMX_ERROR_BREAK(err) { uError = (err); break; }
7901#define HMVMX_CHECK_BREAK(expr, err) if (!(expr)) { \
7902 uError = (err); \
7903 break; \
7904 } else do {} while (0)
7905/* Duplicate of IEM_IS_CANONICAL(). */
7906#define HMVMX_IS_CANONICAL(a_u64Addr) ((uint64_t)(a_u64Addr) + UINT64_C(0x800000000000) < UINT64_C(0x1000000000000))
7907
7908 int rc;
7909 uint64_t u64Val;
7910 uint32_t u32Val;
7911 uint32_t uError = VMX_IGS_ERROR;
7912 bool fUnrestrictedGuest = pVM->hm.s.vmx.fUnrestrictedGuest;
7913
7914 do
7915 {
7916 /*
7917 * CR0.
7918 */
7919 uint32_t uSetCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 & pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
7920 uint32_t uZapCR0 = (uint32_t)(pVM->hm.s.vmx.msr.vmx_cr0_fixed0 | pVM->hm.s.vmx.msr.vmx_cr0_fixed1);
7921 /* Exceptions for unrestricted-guests for fixed CR0 bits (PE, PG).
7922 See Intel spec. 26.3.1 "Checks on guest Guest Control Registers, Debug Registers and MSRs." */
7923 if (fUnrestrictedGuest)
7924 uSetCR0 &= ~(X86_CR0_PE | X86_CR0_PG);
7925
7926 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val);
7927 AssertRCBreak(rc);
7928 HMVMX_CHECK_BREAK((u32Val & uSetCR0) == uSetCR0, VMX_IGS_CR0_FIXED1);
7929 HMVMX_CHECK_BREAK(!(u32Val & ~uZapCR0), VMX_IGS_CR0_FIXED0);
7930 if ( !fUnrestrictedGuest
7931 && (u32Val & X86_CR0_PG)
7932 && !(u32Val & X86_CR0_PE))
7933 {
7934 HMVMX_ERROR_BREAK(VMX_IGS_CR0_PG_PE_COMBO);
7935 }
7936
7937 /*
7938 * CR4.
7939 */
7940 uint64_t uSetCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 & pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
7941 uint64_t uZapCR4 = (pVM->hm.s.vmx.msr.vmx_cr4_fixed0 | pVM->hm.s.vmx.msr.vmx_cr4_fixed1);
7942 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR4, &u32Val);
7943 AssertRCBreak(rc);
7944 HMVMX_CHECK_BREAK((u32Val & uSetCR4) == uSetCR4, VMX_IGS_CR4_FIXED1);
7945 HMVMX_CHECK_BREAK(!(u32Val & ~uZapCR4), VMX_IGS_CR4_FIXED0);
7946
7947 /*
7948 * IA32_DEBUGCTL MSR.
7949 */
7950 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_DEBUGCTL_FULL, &u64Val);
7951 AssertRCBreak(rc);
7952 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
7953 && (u64Val & 0xfffffe3c)) /* Bits 31:9, bits 5:2 MBZ. */
7954 {
7955 HMVMX_ERROR_BREAK(VMX_IGS_DEBUGCTL_MSR_RESERVED);
7956 }
7957 uint64_t u64DebugCtlMsr = u64Val;
7958
7959#ifdef VBOX_STRICT
7960 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, &u32Val);
7961 AssertRCBreak(rc);
7962 Assert(u32Val == pVCpu->hm.s.vmx.u32ProcCtls);
7963#endif
7964 bool const fLongModeGuest = !!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST);
7965
7966 /*
7967 * RIP and RFLAGS.
7968 */
7969 uint32_t u32Eflags;
7970#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
7971 if (HMVMX_IS_64BIT_HOST_MODE())
7972 {
7973 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RIP, &u64Val);
7974 AssertRCBreak(rc);
7975 /* pCtx->rip can be different than the one in the VMCS (e.g. run guest code and VM-exits that don't update it). */
7976 if ( !fLongModeGuest
7977 || !pCtx->cs.Attr.n.u1Long)
7978 {
7979 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffff00000000)), VMX_IGS_LONGMODE_RIP_INVALID);
7980 }
7981 /** @todo If the processor supports N < 64 linear-address bits, bits 63:N
7982 * must be identical if the "IA32e mode guest" VM-entry control is 1
7983 * and CS.L is 1. No check applies if the CPU supports 64
7984 * linear-address bits. */
7985
7986 /* Flags in pCtx can be different (real-on-v86 for instance). We are only concerned about the VMCS contents here. */
7987 rc = VMXReadVmcs64(VMX_VMCS_GUEST_RFLAGS, &u64Val);
7988 AssertRCBreak(rc);
7989 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffc08028)), /* Bit 63:22, Bit 15, 5, 3 MBZ. */
7990 VMX_IGS_RFLAGS_RESERVED);
7991 HMVMX_CHECK_BREAK((u64Val & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
7992 u32Eflags = u64Val;
7993 }
7994 else
7995#endif
7996 {
7997 rc = VMXReadVmcs32(VMX_VMCS_GUEST_RFLAGS, &u32Eflags);
7998 AssertRCBreak(rc);
7999 HMVMX_CHECK_BREAK(!(u32Eflags & 0xffc08028), VMX_IGS_RFLAGS_RESERVED); /* Bit 31:22, Bit 15, 5, 3 MBZ. */
8000 HMVMX_CHECK_BREAK((u32Eflags & X86_EFL_RA1_MASK), VMX_IGS_RFLAGS_RESERVED1); /* Bit 1 MB1. */
8001 }
8002
8003 if ( fLongModeGuest
8004 || !(pCtx->cr0 & X86_CR0_PE))
8005 {
8006 HMVMX_CHECK_BREAK(!(u32Eflags & X86_EFL_VM), VMX_IGS_RFLAGS_VM_INVALID);
8007 }
8008
8009 uint32_t u32EntryInfo;
8010 rc = VMXReadVmcs32(VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO, &u32EntryInfo);
8011 AssertRCBreak(rc);
8012 if ( VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo)
8013 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
8014 {
8015 HMVMX_CHECK_BREAK(u32Val & X86_EFL_IF, VMX_IGS_RFLAGS_IF_INVALID);
8016 }
8017
8018 /*
8019 * 64-bit checks.
8020 */
8021#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8022 if (HMVMX_IS_64BIT_HOST_MODE())
8023 {
8024 if ( fLongModeGuest
8025 && !fUnrestrictedGuest)
8026 {
8027 HMVMX_CHECK_BREAK(CPUMIsGuestPagingEnabledEx(pCtx), VMX_IGS_CR0_PG_LONGMODE);
8028 HMVMX_CHECK_BREAK((pCtx->cr4 & X86_CR4_PAE), VMX_IGS_CR4_PAE_LONGMODE);
8029 }
8030
8031 if ( !fLongModeGuest
8032 && (pCtx->cr4 & X86_CR4_PCIDE))
8033 {
8034 HMVMX_ERROR_BREAK(VMX_IGS_CR4_PCIDE);
8035 }
8036
8037 /** @todo CR3 field must be such that bits 63:52 and bits in the range
8038 * 51:32 beyond the processor's physical-address width are 0. */
8039
8040 if ( (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG)
8041 && (pCtx->dr[7] & X86_DR7_MBZ_MASK))
8042 {
8043 HMVMX_ERROR_BREAK(VMX_IGS_DR7_RESERVED);
8044 }
8045
8046 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_ESP, &u64Val);
8047 AssertRCBreak(rc);
8048 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_ESP_NOT_CANONICAL);
8049
8050 rc = VMXReadVmcs64(VMX_VMCS_HOST_SYSENTER_EIP, &u64Val);
8051 AssertRCBreak(rc);
8052 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_SYSENTER_EIP_NOT_CANONICAL);
8053 }
8054#endif
8055
8056 /*
8057 * PERF_GLOBAL MSR.
8058 */
8059 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR)
8060 {
8061 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL, &u64Val);
8062 AssertRCBreak(rc);
8063 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffff8fffffffc)),
8064 VMX_IGS_PERF_GLOBAL_MSR_RESERVED); /* Bits 63:35, bits 31:2 MBZ. */
8065 }
8066
8067 /*
8068 * PAT MSR.
8069 */
8070 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR)
8071 {
8072 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_PAT_FULL, &u64Val);
8073 AssertRCBreak(rc);
8074 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0x707070707070707)), VMX_IGS_PAT_MSR_RESERVED);
8075 for (unsigned i = 0; i < 8; i++)
8076 {
8077 uint8_t u8Val = (u64Val & 0x7);
8078 if ( u8Val != 0 /* UC */
8079 || u8Val != 1 /* WC */
8080 || u8Val != 4 /* WT */
8081 || u8Val != 5 /* WP */
8082 || u8Val != 6 /* WB */
8083 || u8Val != 7 /* UC- */)
8084 {
8085 HMVMX_ERROR_BREAK(VMX_IGS_PAT_MSR_INVALID);
8086 }
8087 u64Val >>= 3;
8088 }
8089 }
8090
8091 /*
8092 * EFER MSR.
8093 */
8094 if (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR)
8095 {
8096 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_EFER_FULL, &u64Val);
8097 AssertRCBreak(rc);
8098 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xfffffffffffff2fe)),
8099 VMX_IGS_EFER_MSR_RESERVED); /* Bits 63:12, bit 9, bits 7:1 MBZ. */
8100 HMVMX_CHECK_BREAK((u64Val & MSR_K6_EFER_LMA) == (pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST),
8101 VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH);
8102 HMVMX_CHECK_BREAK( fUnrestrictedGuest
8103 || (u64Val & MSR_K6_EFER_LMA) == (pCtx->cr0 & X86_CR0_PG), VMX_IGS_EFER_LMA_PG_MISMATCH);
8104 }
8105
8106 /*
8107 * Segment registers.
8108 */
8109 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
8110 || !(pCtx->ldtr.Sel & X86_SEL_LDT), VMX_IGS_LDTR_TI_INVALID);
8111 if (!(u32Eflags & X86_EFL_VM))
8112 {
8113 /* CS */
8114 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1Present, VMX_IGS_CS_ATTR_P_INVALID);
8115 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xf00), VMX_IGS_CS_ATTR_RESERVED);
8116 HMVMX_CHECK_BREAK(!(pCtx->cs.Attr.u & 0xfffe0000), VMX_IGS_CS_ATTR_RESERVED);
8117 HMVMX_CHECK_BREAK( (pCtx->cs.u32Limit & 0xfff) == 0xfff
8118 || !(pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
8119 HMVMX_CHECK_BREAK( !(pCtx->cs.u32Limit & 0xfff00000)
8120 || (pCtx->cs.Attr.n.u1Granularity), VMX_IGS_CS_ATTR_G_INVALID);
8121 /* CS cannot be loaded with NULL in protected mode. */
8122 HMVMX_CHECK_BREAK(pCtx->cs.Attr.u && !(pCtx->cs.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_CS_ATTR_UNUSABLE);
8123 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u1DescType, VMX_IGS_CS_ATTR_S_INVALID);
8124 if (pCtx->cs.Attr.n.u4Type == 9 || pCtx->cs.Attr.n.u4Type == 11)
8125 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL);
8126 else if (pCtx->cs.Attr.n.u4Type == 13 || pCtx->cs.Attr.n.u4Type == 15)
8127 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl <= pCtx->ss.Attr.n.u2Dpl, VMX_IGS_CS_SS_ATTR_DPL_MISMATCH);
8128 else if (pVM->hm.s.vmx.fUnrestrictedGuest && pCtx->cs.Attr.n.u4Type == 3)
8129 HMVMX_CHECK_BREAK(pCtx->cs.Attr.n.u2Dpl == 0, VMX_IGS_CS_ATTR_DPL_INVALID);
8130 else
8131 HMVMX_ERROR_BREAK(VMX_IGS_CS_ATTR_TYPE_INVALID);
8132
8133 /* SS */
8134 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8135 || (pCtx->ss.Sel & X86_SEL_RPL) == (pCtx->cs.Sel & X86_SEL_RPL), VMX_IGS_SS_CS_RPL_UNEQUAL);
8136 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u2Dpl == (pCtx->ss.Sel & X86_SEL_RPL), VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL);
8137 if ( !(pCtx->cr0 & X86_CR0_PE)
8138 || pCtx->cs.Attr.n.u4Type == 3)
8139 {
8140 HMVMX_CHECK_BREAK(!pCtx->ss.Attr.n.u2Dpl, VMX_IGS_SS_ATTR_DPL_INVALID);
8141 }
8142 if (!(pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE))
8143 {
8144 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u4Type == 3 || pCtx->ss.Attr.n.u4Type == 7, VMX_IGS_SS_ATTR_TYPE_INVALID);
8145 HMVMX_CHECK_BREAK(pCtx->ss.Attr.n.u1Present, VMX_IGS_SS_ATTR_P_INVALID);
8146 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xf00), VMX_IGS_SS_ATTR_RESERVED);
8147 HMVMX_CHECK_BREAK(!(pCtx->ss.Attr.u & 0xfffe0000), VMX_IGS_SS_ATTR_RESERVED);
8148 HMVMX_CHECK_BREAK( (pCtx->ss.u32Limit & 0xfff) == 0xfff
8149 || !(pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
8150 HMVMX_CHECK_BREAK( !(pCtx->ss.u32Limit & 0xfff00000)
8151 || (pCtx->ss.Attr.n.u1Granularity), VMX_IGS_SS_ATTR_G_INVALID);
8152 }
8153
8154 /* DS, ES, FS, GS - only check for usable selectors, see hmR0VmxWriteSegmentReg(). */
8155 if (!(pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE))
8156 {
8157 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_DS_ATTR_A_INVALID);
8158 HMVMX_CHECK_BREAK(pCtx->ds.Attr.n.u1Present, VMX_IGS_DS_ATTR_P_INVALID);
8159 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8160 || pCtx->ds.Attr.n.u4Type > 11
8161 || pCtx->ds.Attr.n.u2Dpl >= (pCtx->ds.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
8162 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xf00), VMX_IGS_DS_ATTR_RESERVED);
8163 HMVMX_CHECK_BREAK(!(pCtx->ds.Attr.u & 0xfffe0000), VMX_IGS_DS_ATTR_RESERVED);
8164 HMVMX_CHECK_BREAK( (pCtx->ds.u32Limit & 0xfff) == 0xfff
8165 || !(pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
8166 HMVMX_CHECK_BREAK( !(pCtx->ds.u32Limit & 0xfff00000)
8167 || (pCtx->ds.Attr.n.u1Granularity), VMX_IGS_DS_ATTR_G_INVALID);
8168 HMVMX_CHECK_BREAK( !(pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_CODE)
8169 || (pCtx->ds.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_DS_ATTR_TYPE_INVALID);
8170 }
8171 if (!(pCtx->es.Attr.u & X86DESCATTR_UNUSABLE))
8172 {
8173 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_ES_ATTR_A_INVALID);
8174 HMVMX_CHECK_BREAK(pCtx->es.Attr.n.u1Present, VMX_IGS_ES_ATTR_P_INVALID);
8175 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8176 || pCtx->es.Attr.n.u4Type > 11
8177 || pCtx->es.Attr.n.u2Dpl >= (pCtx->es.Sel & X86_SEL_RPL), VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL);
8178 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xf00), VMX_IGS_ES_ATTR_RESERVED);
8179 HMVMX_CHECK_BREAK(!(pCtx->es.Attr.u & 0xfffe0000), VMX_IGS_ES_ATTR_RESERVED);
8180 HMVMX_CHECK_BREAK( (pCtx->es.u32Limit & 0xfff) == 0xfff
8181 || !(pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
8182 HMVMX_CHECK_BREAK( !(pCtx->es.u32Limit & 0xfff00000)
8183 || (pCtx->es.Attr.n.u1Granularity), VMX_IGS_ES_ATTR_G_INVALID);
8184 HMVMX_CHECK_BREAK( !(pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_CODE)
8185 || (pCtx->es.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_ES_ATTR_TYPE_INVALID);
8186 }
8187 if (!(pCtx->fs.Attr.u & X86DESCATTR_UNUSABLE))
8188 {
8189 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_FS_ATTR_A_INVALID);
8190 HMVMX_CHECK_BREAK(pCtx->fs.Attr.n.u1Present, VMX_IGS_FS_ATTR_P_INVALID);
8191 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8192 || pCtx->fs.Attr.n.u4Type > 11
8193 || pCtx->fs.Attr.n.u2Dpl >= (pCtx->fs.Sel & X86_SEL_RPL), VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL);
8194 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xf00), VMX_IGS_FS_ATTR_RESERVED);
8195 HMVMX_CHECK_BREAK(!(pCtx->fs.Attr.u & 0xfffe0000), VMX_IGS_FS_ATTR_RESERVED);
8196 HMVMX_CHECK_BREAK( (pCtx->fs.u32Limit & 0xfff) == 0xfff
8197 || !(pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
8198 HMVMX_CHECK_BREAK( !(pCtx->fs.u32Limit & 0xfff00000)
8199 || (pCtx->fs.Attr.n.u1Granularity), VMX_IGS_FS_ATTR_G_INVALID);
8200 HMVMX_CHECK_BREAK( !(pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
8201 || (pCtx->fs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_FS_ATTR_TYPE_INVALID);
8202 }
8203 if (!(pCtx->gs.Attr.u & X86DESCATTR_UNUSABLE))
8204 {
8205 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_ACCESSED, VMX_IGS_GS_ATTR_A_INVALID);
8206 HMVMX_CHECK_BREAK(pCtx->gs.Attr.n.u1Present, VMX_IGS_GS_ATTR_P_INVALID);
8207 HMVMX_CHECK_BREAK( pVM->hm.s.vmx.fUnrestrictedGuest
8208 || pCtx->gs.Attr.n.u4Type > 11
8209 || pCtx->gs.Attr.n.u2Dpl >= (pCtx->gs.Sel & X86_SEL_RPL), VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL);
8210 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xf00), VMX_IGS_GS_ATTR_RESERVED);
8211 HMVMX_CHECK_BREAK(!(pCtx->gs.Attr.u & 0xfffe0000), VMX_IGS_GS_ATTR_RESERVED);
8212 HMVMX_CHECK_BREAK( (pCtx->gs.u32Limit & 0xfff) == 0xfff
8213 || !(pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
8214 HMVMX_CHECK_BREAK( !(pCtx->gs.u32Limit & 0xfff00000)
8215 || (pCtx->gs.Attr.n.u1Granularity), VMX_IGS_GS_ATTR_G_INVALID);
8216 HMVMX_CHECK_BREAK( !(pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_CODE)
8217 || (pCtx->gs.Attr.n.u4Type & X86_SEL_TYPE_READ), VMX_IGS_GS_ATTR_TYPE_INVALID);
8218 }
8219 /* 64-bit capable CPUs. */
8220#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8221 if (HMVMX_IS_64BIT_HOST_MODE())
8222 {
8223 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
8224 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
8225 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
8226 || HMVMX_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
8227 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
8228 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
8229 VMX_IGS_LONGMODE_SS_BASE_INVALID);
8230 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
8231 VMX_IGS_LONGMODE_DS_BASE_INVALID);
8232 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
8233 VMX_IGS_LONGMODE_ES_BASE_INVALID);
8234 }
8235#endif
8236 }
8237 else
8238 {
8239 /* V86 mode checks. */
8240 uint32_t u32CSAttr, u32SSAttr, u32DSAttr, u32ESAttr, u32FSAttr, u32GSAttr;
8241 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8242 {
8243 u32CSAttr = 0xf3; u32SSAttr = 0xf3;
8244 u32DSAttr = 0xf3; u32ESAttr = 0xf3;
8245 u32FSAttr = 0xf3; u32GSAttr = 0xf3;
8246 }
8247 else
8248 {
8249 u32CSAttr = pCtx->cs.Attr.u; u32SSAttr = pCtx->ss.Attr.u;
8250 u32DSAttr = pCtx->ds.Attr.u; u32ESAttr = pCtx->es.Attr.u;
8251 u32FSAttr = pCtx->fs.Attr.u; u32GSAttr = pCtx->gs.Attr.u;
8252 }
8253
8254 /* CS */
8255 HMVMX_CHECK_BREAK((pCtx->cs.u64Base == (uint64_t)pCtx->cs.Sel << 4), VMX_IGS_V86_CS_BASE_INVALID);
8256 HMVMX_CHECK_BREAK(pCtx->cs.u32Limit == 0xffff, VMX_IGS_V86_CS_LIMIT_INVALID);
8257 HMVMX_CHECK_BREAK(u32CSAttr == 0xf3, VMX_IGS_V86_CS_ATTR_INVALID);
8258 /* SS */
8259 HMVMX_CHECK_BREAK((pCtx->ss.u64Base == (uint64_t)pCtx->ss.Sel << 4), VMX_IGS_V86_SS_BASE_INVALID);
8260 HMVMX_CHECK_BREAK(pCtx->ss.u32Limit == 0xffff, VMX_IGS_V86_SS_LIMIT_INVALID);
8261 HMVMX_CHECK_BREAK(u32SSAttr == 0xf3, VMX_IGS_V86_SS_ATTR_INVALID);
8262 /* DS */
8263 HMVMX_CHECK_BREAK((pCtx->ds.u64Base == (uint64_t)pCtx->ds.Sel << 4), VMX_IGS_V86_DS_BASE_INVALID);
8264 HMVMX_CHECK_BREAK(pCtx->ds.u32Limit == 0xffff, VMX_IGS_V86_DS_LIMIT_INVALID);
8265 HMVMX_CHECK_BREAK(u32DSAttr == 0xf3, VMX_IGS_V86_DS_ATTR_INVALID);
8266 /* ES */
8267 HMVMX_CHECK_BREAK((pCtx->es.u64Base == (uint64_t)pCtx->es.Sel << 4), VMX_IGS_V86_ES_BASE_INVALID);
8268 HMVMX_CHECK_BREAK(pCtx->es.u32Limit == 0xffff, VMX_IGS_V86_ES_LIMIT_INVALID);
8269 HMVMX_CHECK_BREAK(u32ESAttr == 0xf3, VMX_IGS_V86_ES_ATTR_INVALID);
8270 /* FS */
8271 HMVMX_CHECK_BREAK((pCtx->fs.u64Base == (uint64_t)pCtx->fs.Sel << 4), VMX_IGS_V86_FS_BASE_INVALID);
8272 HMVMX_CHECK_BREAK(pCtx->fs.u32Limit == 0xffff, VMX_IGS_V86_FS_LIMIT_INVALID);
8273 HMVMX_CHECK_BREAK(u32FSAttr == 0xf3, VMX_IGS_V86_FS_ATTR_INVALID);
8274 /* GS */
8275 HMVMX_CHECK_BREAK((pCtx->gs.u64Base == (uint64_t)pCtx->gs.Sel << 4), VMX_IGS_V86_GS_BASE_INVALID);
8276 HMVMX_CHECK_BREAK(pCtx->gs.u32Limit == 0xffff, VMX_IGS_V86_GS_LIMIT_INVALID);
8277 HMVMX_CHECK_BREAK(u32GSAttr == 0xf3, VMX_IGS_V86_GS_ATTR_INVALID);
8278 /* 64-bit capable CPUs. */
8279#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8280 if (HMVMX_IS_64BIT_HOST_MODE())
8281 {
8282 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->fs.u64Base), VMX_IGS_FS_BASE_NOT_CANONICAL);
8283 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->gs.u64Base), VMX_IGS_GS_BASE_NOT_CANONICAL);
8284 HMVMX_CHECK_BREAK( (pCtx->ldtr.Attr.u & X86DESCATTR_UNUSABLE)
8285 || HMVMX_IS_CANONICAL(pCtx->ldtr.u64Base), VMX_IGS_LDTR_BASE_NOT_CANONICAL);
8286 HMVMX_CHECK_BREAK(!(pCtx->cs.u64Base >> 32), VMX_IGS_LONGMODE_CS_BASE_INVALID);
8287 HMVMX_CHECK_BREAK((pCtx->ss.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ss.u64Base >> 32),
8288 VMX_IGS_LONGMODE_SS_BASE_INVALID);
8289 HMVMX_CHECK_BREAK((pCtx->ds.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->ds.u64Base >> 32),
8290 VMX_IGS_LONGMODE_DS_BASE_INVALID);
8291 HMVMX_CHECK_BREAK((pCtx->es.Attr.u & X86DESCATTR_UNUSABLE) || !(pCtx->es.u64Base >> 32),
8292 VMX_IGS_LONGMODE_ES_BASE_INVALID);
8293 }
8294#endif
8295 }
8296
8297 /*
8298 * TR.
8299 */
8300 HMVMX_CHECK_BREAK(!(pCtx->tr.Sel & X86_SEL_LDT), VMX_IGS_TR_TI_INVALID);
8301 /* 64-bit capable CPUs. */
8302#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8303 if (HMVMX_IS_64BIT_HOST_MODE())
8304 {
8305 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(pCtx->tr.u64Base), VMX_IGS_TR_BASE_NOT_CANONICAL);
8306 }
8307#endif
8308 if (fLongModeGuest)
8309 {
8310 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u4Type == 11, /* 64-bit busy TSS. */
8311 VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID);
8312 }
8313 else
8314 {
8315 HMVMX_CHECK_BREAK( pCtx->tr.Attr.n.u4Type == 3 /* 16-bit busy TSS. */
8316 || pCtx->tr.Attr.n.u4Type == 11, /* 32-bit busy TSS.*/
8317 VMX_IGS_TR_ATTR_TYPE_INVALID);
8318 }
8319 HMVMX_CHECK_BREAK(!pCtx->tr.Attr.n.u1DescType, VMX_IGS_TR_ATTR_S_INVALID);
8320 HMVMX_CHECK_BREAK(pCtx->tr.Attr.n.u1Present, VMX_IGS_TR_ATTR_P_INVALID);
8321 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & 0xf00), VMX_IGS_TR_ATTR_RESERVED); /* Bits 11:8 MBZ. */
8322 HMVMX_CHECK_BREAK( (pCtx->tr.u32Limit & 0xfff) == 0xfff
8323 || !(pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
8324 HMVMX_CHECK_BREAK( !(pCtx->tr.u32Limit & 0xfff00000)
8325 || (pCtx->tr.Attr.n.u1Granularity), VMX_IGS_TR_ATTR_G_INVALID);
8326 HMVMX_CHECK_BREAK(!(pCtx->tr.Attr.u & X86DESCATTR_UNUSABLE), VMX_IGS_TR_ATTR_UNUSABLE);
8327
8328 /*
8329 * GDTR and IDTR.
8330 */
8331#if HC_ARCH_BITS == 64 || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
8332 if (HMVMX_IS_64BIT_HOST_MODE())
8333 {
8334 rc = VMXReadVmcs64(VMX_VMCS_GUEST_GDTR_BASE, &u64Val);
8335 AssertRCBreak(rc);
8336 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_GDTR_BASE_NOT_CANONICAL);
8337
8338 rc = VMXReadVmcs64(VMX_VMCS_GUEST_IDTR_BASE, &u64Val);
8339 AssertRCBreak(rc);
8340 HMVMX_CHECK_BREAK(HMVMX_IS_CANONICAL(u64Val), VMX_IGS_IDTR_BASE_NOT_CANONICAL);
8341 }
8342#endif
8343
8344 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_GDTR_LIMIT, &u32Val);
8345 AssertRCBreak(rc);
8346 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_GDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
8347
8348 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_IDTR_LIMIT, &u32Val);
8349 AssertRCBreak(rc);
8350 HMVMX_CHECK_BREAK(!(u32Val & 0xffff0000), VMX_IGS_IDTR_LIMIT_INVALID); /* Bits 31:16 MBZ. */
8351
8352 /*
8353 * Guest Non-Register State.
8354 */
8355 /* Activity State. */
8356 uint32_t u32ActivityState;
8357 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_ACTIVITY_STATE, &u32ActivityState);
8358 AssertRCBreak(rc);
8359 HMVMX_CHECK_BREAK( !u32ActivityState
8360 || (u32ActivityState & MSR_IA32_VMX_MISC_ACTIVITY_STATES(pVM->hm.s.vmx.msr.vmx_misc)),
8361 VMX_IGS_ACTIVITY_STATE_INVALID);
8362 HMVMX_CHECK_BREAK( !(pCtx->ss.Attr.n.u2Dpl)
8363 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_HLT, VMX_IGS_ACTIVITY_STATE_HLT_INVALID);
8364 uint32_t u32IntrState;
8365 rc = VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &u32IntrState);
8366 AssertRCBreak(rc);
8367 if ( u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS
8368 || u32IntrState == VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
8369 {
8370 HMVMX_CHECK_BREAK(u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_ACTIVE, VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID);
8371 }
8372
8373 /** @todo Activity state and injecting interrupts. Left as a todo since we
8374 * currently don't use activity states but ACTIVE. */
8375
8376 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
8377 || u32ActivityState != VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT, VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID);
8378
8379 /* Guest interruptibility-state. */
8380 HMVMX_CHECK_BREAK(!(u32IntrState & 0xfffffff0), VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED);
8381 HMVMX_CHECK_BREAK((u32IntrState & ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
8382 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS))
8383 != ( VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI
8384 | VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
8385 VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID);
8386 HMVMX_CHECK_BREAK( (u32Eflags & X86_EFL_IF)
8387 || !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
8388 VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID);
8389 if (VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo))
8390 {
8391 if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT)
8392 {
8393 HMVMX_CHECK_BREAK( !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
8394 && !(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
8395 VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID);
8396 }
8397 else if (VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
8398 {
8399 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS),
8400 VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID);
8401 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI),
8402 VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID);
8403 }
8404 }
8405 /** @todo Assumes the processor is not in SMM. */
8406 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
8407 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID);
8408 HMVMX_CHECK_BREAK( !(pVCpu->hm.s.vmx.u32EntryCtls & VMX_VMCS_CTRL_ENTRY_ENTRY_SMM)
8409 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI),
8410 VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID);
8411 if ( (pVCpu->hm.s.vmx.u32PinCtls & VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI)
8412 && VMX_ENTRY_INTERRUPTION_INFO_VALID(u32EntryInfo)
8413 && VMX_ENTRY_INTERRUPTION_INFO_TYPE(u32EntryInfo) == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
8414 {
8415 HMVMX_CHECK_BREAK(!(u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI),
8416 VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID);
8417 }
8418
8419 /* Pending debug exceptions. */
8420 if (HMVMX_IS_64BIT_HOST_MODE())
8421 {
8422 rc = VMXReadVmcs64(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u64Val);
8423 AssertRCBreak(rc);
8424 /* Bits 63:15, Bit 13, Bits 11:4 MBZ. */
8425 HMVMX_CHECK_BREAK(!(u64Val & UINT64_C(0xffffffffffffaff0)), VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED);
8426 u32Val = u64Val; /* For pending debug exceptions checks below. */
8427 }
8428 else
8429 {
8430 rc = VMXReadVmcs32(VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS, &u32Val);
8431 AssertRCBreak(rc);
8432 /* Bits 31:15, Bit 13, Bits 11:4 MBZ. */
8433 HMVMX_CHECK_BREAK(!(u64Val & 0xffffaff0), VMX_IGS_PENDING_DEBUG_RESERVED);
8434 }
8435
8436 if ( (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI)
8437 || (u32IntrState & VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS)
8438 || u32ActivityState == VMX_VMCS_GUEST_ACTIVITY_HLT)
8439 {
8440 if ( (u32Eflags & X86_EFL_TF)
8441 && !(u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
8442 {
8443 /* Bit 14 is PendingDebug.BS. */
8444 HMVMX_CHECK_BREAK(u32Val & RT_BIT(14), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET);
8445 }
8446 if ( !(u32Eflags & X86_EFL_TF)
8447 || (u64DebugCtlMsr & RT_BIT_64(1))) /* Bit 1 is IA32_DEBUGCTL.BTF. */
8448 {
8449 /* Bit 14 is PendingDebug.BS. */
8450 HMVMX_CHECK_BREAK(!(u32Val & RT_BIT(14)), VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR);
8451 }
8452 }
8453
8454 /* VMCS link pointer. */
8455 rc = VMXReadVmcs64(VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL, &u64Val);
8456 AssertRCBreak(rc);
8457 if (u64Val != UINT64_C(0xffffffffffffffff))
8458 {
8459 HMVMX_CHECK_BREAK(!(u64Val & 0xfff), VMX_IGS_VMCS_LINK_PTR_RESERVED);
8460 /** @todo Bits beyond the processor's physical-address width MBZ. */
8461 /** @todo 32-bit located in memory referenced by value of this field (as a
8462 * physical address) must contain the processor's VMCS revision ID. */
8463 /** @todo SMM checks. */
8464 }
8465
8466 /** @todo Checks on Guest Page-Directory-Pointer-Table Entries. */
8467
8468 /* Shouldn't happen but distinguish it from AssertRCBreak() errors. */
8469 if (uError == VMX_IGS_ERROR)
8470 uError = VMX_IGS_REASON_NOT_FOUND;
8471 } while (0);
8472
8473 pVCpu->hm.s.u32HMError = uError;
8474 return uError;
8475
8476#undef HMVMX_ERROR_BREAK
8477#undef HMVMX_CHECK_BREAK
8478#undef HMVMX_IS_CANONICAL
8479}
8480
8481/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8482/* -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- VM-exit handlers -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- */
8483/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= */
8484
8485/** @name VM-exit handlers.
8486 * @{
8487 */
8488
8489/**
8490 * VM-exit handler for external interrupts (VMX_EXIT_EXT_INT).
8491 */
8492HMVMX_EXIT_DECL hmR0VmxExitExtInt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8493{
8494 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8495 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitExtInt);
8496 /* 32-bit Windows hosts (4 cores) has trouble with this; causes higher interrupt latency. */
8497#if HC_ARCH_BITS == 64
8498 Assert(ASMIntAreEnabled());
8499 if (pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUsePreemptTimer)
8500 return VINF_SUCCESS;
8501#endif
8502 return VINF_EM_RAW_INTERRUPT;
8503}
8504
8505
8506/**
8507 * VM-exit handler for exceptions or NMIs (VMX_EXIT_XCPT_OR_NMI).
8508 */
8509HMVMX_EXIT_DECL hmR0VmxExitXcptOrNmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8510{
8511 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8512 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitXcptNmi, y3);
8513
8514 int rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
8515 AssertRCReturn(rc, rc);
8516
8517 uint32_t uIntrType = VMX_EXIT_INTERRUPTION_INFO_TYPE(pVmxTransient->uExitIntrInfo);
8518 Assert( !(pVCpu->hm.s.vmx.u32ExitCtls & VMX_VMCS_CTRL_EXIT_ACK_EXT_INT)
8519 && uIntrType != VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT);
8520 Assert(VMX_EXIT_INTERRUPTION_INFO_IS_VALID(pVmxTransient->uExitIntrInfo));
8521
8522 if (uIntrType == VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI)
8523 {
8524 /*
8525 * This cannot be a guest NMI as the only way for the guest to receive an NMI is if we injected it ourselves and
8526 * anything we inject is not going to cause a VM-exit directly for the event being injected.
8527 * See Intel spec. 27.2.3 "Information for VM Exits During Event Delivery".
8528 *
8529 * Dispatch the NMI to the host. See Intel spec. 27.5.5 "Updating Non-Register State".
8530 */
8531 VMXDispatchHostNmi();
8532 STAM_REL_COUNTER_INC(&pVCpu->hm.s.StatExitHostNmi);
8533 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8534 return VINF_SUCCESS;
8535 }
8536
8537 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
8538 rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
8539 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
8540 {
8541 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8542 return VINF_SUCCESS;
8543 }
8544 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
8545 {
8546 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8547 return rc;
8548 }
8549
8550 uint32_t uExitIntrInfo = pVmxTransient->uExitIntrInfo;
8551 uint32_t uVector = VMX_EXIT_INTERRUPTION_INFO_VECTOR(uExitIntrInfo);
8552 switch (uIntrType)
8553 {
8554 case VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT: /* Software exception. (#BP or #OF) */
8555 Assert(uVector == X86_XCPT_DB || uVector == X86_XCPT_BP || uVector == X86_XCPT_OF);
8556 /* no break */
8557 case VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT:
8558 {
8559 switch (uVector)
8560 {
8561 case X86_XCPT_PF: rc = hmR0VmxExitXcptPF(pVCpu, pMixedCtx, pVmxTransient); break;
8562 case X86_XCPT_GP: rc = hmR0VmxExitXcptGP(pVCpu, pMixedCtx, pVmxTransient); break;
8563 case X86_XCPT_NM: rc = hmR0VmxExitXcptNM(pVCpu, pMixedCtx, pVmxTransient); break;
8564 case X86_XCPT_MF: rc = hmR0VmxExitXcptMF(pVCpu, pMixedCtx, pVmxTransient); break;
8565 case X86_XCPT_DB: rc = hmR0VmxExitXcptDB(pVCpu, pMixedCtx, pVmxTransient); break;
8566 case X86_XCPT_BP: rc = hmR0VmxExitXcptBP(pVCpu, pMixedCtx, pVmxTransient); break;
8567#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
8568 case X86_XCPT_XF: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXF);
8569 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8570 case X86_XCPT_DE: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDE);
8571 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8572 case X86_XCPT_UD: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestUD);
8573 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8574 case X86_XCPT_SS: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestSS);
8575 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8576 case X86_XCPT_NP: STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNP);
8577 rc = hmR0VmxExitXcptGeneric(pVCpu, pMixedCtx, pVmxTransient); break;
8578#endif
8579 default:
8580 {
8581 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8582 AssertRCReturn(rc, rc);
8583
8584 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestXcpUnk);
8585 if (pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
8586 {
8587 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.vmx.pRealModeTSS);
8588 Assert(PDMVmmDevHeapIsEnabled(pVCpu->CTX_SUFF(pVM)));
8589 rc = hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
8590 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
8591 AssertRCReturn(rc, rc);
8592 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(uExitIntrInfo),
8593 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode,
8594 0 /* GCPtrFaultAddress */);
8595 AssertRCReturn(rc, rc);
8596 }
8597 else
8598 {
8599 AssertMsgFailed(("Unexpected VM-exit caused by exception %#x\n", uVector));
8600 pVCpu->hm.s.u32HMError = uVector;
8601 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
8602 }
8603 break;
8604 }
8605 }
8606 break;
8607 }
8608
8609 default:
8610 {
8611 pVCpu->hm.s.u32HMError = uExitIntrInfo;
8612 rc = VERR_VMX_UNEXPECTED_INTERRUPTION_EXIT_CODE;
8613 AssertMsgFailed(("Unexpected interruption code %#x\n", VMX_EXIT_INTERRUPTION_INFO_TYPE(uExitIntrInfo)));
8614 break;
8615 }
8616 }
8617 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitXcptNmi, y3);
8618 return rc;
8619}
8620
8621
8622/**
8623 * VM-exit handler for interrupt-window exiting (VMX_EXIT_INT_WINDOW).
8624 */
8625HMVMX_EXIT_DECL hmR0VmxExitIntWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8626{
8627 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8628
8629 /* Indicate that we no longer need to VM-exit when the guest is ready to receive interrupts, it is now ready. */
8630 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT);
8631 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT;
8632 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
8633 AssertRCReturn(rc, rc);
8634
8635 /* Deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and resume guest execution. */
8636 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIntWindow);
8637 return VINF_SUCCESS;
8638}
8639
8640
8641/**
8642 * VM-exit handler for NMI-window exiting (VMX_EXIT_NMI_WINDOW).
8643 */
8644HMVMX_EXIT_DECL hmR0VmxExitNmiWindow(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8645{
8646 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8647 AssertMsgFailed(("Unexpected NMI-window exit.\n"));
8648 pVCpu->hm.s.u32HMError = VMX_EXIT_NMI_WINDOW;
8649 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8650}
8651
8652
8653/**
8654 * VM-exit handler for WBINVD (VMX_EXIT_WBINVD). Conditional VM-exit.
8655 */
8656HMVMX_EXIT_DECL hmR0VmxExitWbinvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8657{
8658 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8659 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWbinvd);
8660 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8661}
8662
8663
8664/**
8665 * VM-exit handler for INVD (VMX_EXIT_INVD). Unconditional VM-exit.
8666 */
8667HMVMX_EXIT_DECL hmR0VmxExitInvd(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8668{
8669 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8670 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvd);
8671 return hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8672}
8673
8674
8675/**
8676 * VM-exit handler for CPUID (VMX_EXIT_CPUID). Unconditional VM-exit.
8677 */
8678HMVMX_EXIT_DECL hmR0VmxExitCpuid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8679{
8680 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8681 PVM pVM = pVCpu->CTX_SUFF(pVM);
8682 int rc = EMInterpretCpuId(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8683 if (RT_LIKELY(rc == VINF_SUCCESS))
8684 {
8685 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8686 Assert(pVmxTransient->cbInstr == 2);
8687 }
8688 else
8689 {
8690 AssertMsgFailed(("hmR0VmxExitCpuid: EMInterpretCpuId failed with %Rrc\n", rc));
8691 rc = VERR_EM_INTERPRETER;
8692 }
8693 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCpuid);
8694 return rc;
8695}
8696
8697
8698/**
8699 * VM-exit handler for GETSEC (VMX_EXIT_GETSEC). Unconditional VM-exit.
8700 */
8701HMVMX_EXIT_DECL hmR0VmxExitGetsec(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8702{
8703 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8704 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx);
8705 AssertRCReturn(rc, rc);
8706
8707 if (pMixedCtx->cr4 & X86_CR4_SMXE)
8708 return VINF_EM_RAW_EMULATE_INSTR;
8709
8710 AssertMsgFailed(("hmR0VmxExitGetsec: unexpected VM-exit when CR4.SMXE is 0.\n"));
8711 pVCpu->hm.s.u32HMError = VMX_EXIT_GETSEC;
8712 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8713}
8714
8715
8716/**
8717 * VM-exit handler for RDTSC (VMX_EXIT_RDTSC). Conditional VM-exit.
8718 */
8719HMVMX_EXIT_DECL hmR0VmxExitRdtsc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8720{
8721 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8722 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8723 AssertRCReturn(rc, rc);
8724
8725 PVM pVM = pVCpu->CTX_SUFF(pVM);
8726 rc = EMInterpretRdtsc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8727 if (RT_LIKELY(rc == VINF_SUCCESS))
8728 {
8729 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8730 Assert(pVmxTransient->cbInstr == 2);
8731 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
8732 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
8733 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
8734 }
8735 else
8736 {
8737 AssertMsgFailed(("hmR0VmxExitRdtsc: EMInterpretRdtsc failed with %Rrc\n", rc));
8738 rc = VERR_EM_INTERPRETER;
8739 }
8740 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
8741 return rc;
8742}
8743
8744
8745/**
8746 * VM-exit handler for RDTSCP (VMX_EXIT_RDTSCP). Conditional VM-exit.
8747 */
8748HMVMX_EXIT_DECL hmR0VmxExitRdtscp(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8749{
8750 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8751 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8752 rc |= hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx); /* For MSR_K8_TSC_AUX */
8753 AssertRCReturn(rc, rc);
8754
8755 PVM pVM = pVCpu->CTX_SUFF(pVM);
8756 rc = EMInterpretRdtscp(pVM, pVCpu, pMixedCtx);
8757 if (RT_LIKELY(rc == VINF_SUCCESS))
8758 {
8759 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8760 Assert(pVmxTransient->cbInstr == 3);
8761 /* If we get a spurious VM-exit when offsetting is enabled, we must reset offsetting on VM-reentry. See @bugref{6634}. */
8762 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING)
8763 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
8764 }
8765 else
8766 {
8767 AssertMsgFailed(("hmR0VmxExitRdtscp: EMInterpretRdtscp failed with %Rrc\n", rc));
8768 rc = VERR_EM_INTERPRETER;
8769 }
8770 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdtsc);
8771 return rc;
8772}
8773
8774
8775/**
8776 * VM-exit handler for RDPMC (VMX_EXIT_RDPMC). Conditional VM-exit.
8777 */
8778HMVMX_EXIT_DECL hmR0VmxExitRdpmc(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8779{
8780 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8781 int rc = hmR0VmxSaveGuestCR4(pVCpu, pMixedCtx); /** @todo review if CR4 is really required by EM. */
8782 rc |= hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx); /** @todo review if CR0 is really required by EM. */
8783 AssertRCReturn(rc, rc);
8784
8785 PVM pVM = pVCpu->CTX_SUFF(pVM);
8786 rc = EMInterpretRdpmc(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8787 if (RT_LIKELY(rc == VINF_SUCCESS))
8788 {
8789 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8790 Assert(pVmxTransient->cbInstr == 2);
8791 }
8792 else
8793 {
8794 AssertMsgFailed(("hmR0VmxExitRdpmc: EMInterpretRdpmc failed with %Rrc\n", rc));
8795 rc = VERR_EM_INTERPRETER;
8796 }
8797 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdpmc);
8798 return rc;
8799}
8800
8801
8802/**
8803 * VM-exit handler for INVLPG (VMX_EXIT_INVLPG). Conditional VM-exit.
8804 */
8805HMVMX_EXIT_DECL hmR0VmxExitInvlpg(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8806{
8807 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8808 PVM pVM = pVCpu->CTX_SUFF(pVM);
8809 Assert(!pVM->hm.s.fNestedPaging);
8810
8811 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
8812 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
8813 AssertRCReturn(rc, rc);
8814
8815 VBOXSTRICTRC rc2 = EMInterpretInvlpg(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), pVmxTransient->uExitQualification);
8816 rc = VBOXSTRICTRC_VAL(rc2);
8817 if (RT_LIKELY(rc == VINF_SUCCESS))
8818 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8819 else
8820 {
8821 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitInvlpg: EMInterpretInvlpg %#RX64 failed with %Rrc\n",
8822 pVmxTransient->uExitQualification, rc));
8823 }
8824 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInvlpg);
8825 return rc;
8826}
8827
8828
8829/**
8830 * VM-exit handler for MONITOR (VMX_EXIT_MONITOR). Conditional VM-exit.
8831 */
8832HMVMX_EXIT_DECL hmR0VmxExitMonitor(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8833{
8834 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8835 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8836 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8837 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8838 AssertRCReturn(rc, rc);
8839
8840 PVM pVM = pVCpu->CTX_SUFF(pVM);
8841 rc = EMInterpretMonitor(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8842 if (RT_LIKELY(rc == VINF_SUCCESS))
8843 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8844 else
8845 {
8846 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMonitor: EMInterpretMonitor failed with %Rrc\n", rc));
8847 rc = VERR_EM_INTERPRETER;
8848 }
8849 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMonitor);
8850 return rc;
8851}
8852
8853
8854/**
8855 * VM-exit handler for MWAIT (VMX_EXIT_MWAIT). Conditional VM-exit.
8856 */
8857HMVMX_EXIT_DECL hmR0VmxExitMwait(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8858{
8859 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8860 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
8861 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8862 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
8863 AssertRCReturn(rc, rc);
8864
8865 PVM pVM = pVCpu->CTX_SUFF(pVM);
8866 VBOXSTRICTRC rc2 = EMInterpretMWait(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
8867 rc = VBOXSTRICTRC_VAL(rc2);
8868 if (RT_LIKELY( rc == VINF_SUCCESS
8869 || rc == VINF_EM_HALT))
8870 {
8871 int rc3 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
8872 AssertRCReturn(rc3, rc3);
8873
8874 if ( rc == VINF_EM_HALT
8875 && EMShouldContinueAfterHalt(pVCpu, pMixedCtx))
8876 {
8877 rc = VINF_SUCCESS;
8878 }
8879 }
8880 else
8881 {
8882 AssertMsg(rc == VERR_EM_INTERPRETER, ("hmR0VmxExitMwait: EMInterpretMWait failed with %Rrc\n", rc));
8883 rc = VERR_EM_INTERPRETER;
8884 }
8885 AssertMsg(rc == VINF_SUCCESS || rc == VINF_EM_HALT || rc == VERR_EM_INTERPRETER,
8886 ("hmR0VmxExitMwait: failed, invalid error code %Rrc\n", rc));
8887 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMwait);
8888 return rc;
8889}
8890
8891
8892/**
8893 * VM-exit handler for RSM (VMX_EXIT_RSM). Unconditional VM-exit.
8894 */
8895HMVMX_EXIT_DECL hmR0VmxExitRsm(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8896{
8897 /*
8898 * Execution of RSM outside of SMM mode causes #UD regardless of VMX root or VMX non-root mode. In theory, we should never
8899 * get this VM-exit. This can happen only if dual-monitor treatment of SMI and VMX is enabled, which can (only?) be done by
8900 * executing VMCALL in VMX root operation. If we get here, something funny is going on.
8901 * See Intel spec. "33.15.5 Enabling the Dual-Monitor Treatment".
8902 */
8903 AssertMsgFailed(("Unexpected RSM VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8904 pVCpu->hm.s.u32HMError = VMX_EXIT_RSM;
8905 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8906}
8907
8908
8909/**
8910 * VM-exit handler for SMI (VMX_EXIT_SMI). Unconditional VM-exit.
8911 */
8912HMVMX_EXIT_DECL hmR0VmxExitSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8913{
8914 /*
8915 * This can only happen if we support dual-monitor treatment of SMI, which can be activated by executing VMCALL in VMX
8916 * root operation. Only an STM (SMM transfer monitor) would get this exit when we (the executive monitor) execute a VMCALL
8917 * in VMX root mode or receive an SMI. If we get here, something funny is going on.
8918 * See Intel spec. "33.15.6 Activating the Dual-Monitor Treatment" and Intel spec. 25.3 "Other Causes of VM-Exits"
8919 */
8920 AssertMsgFailed(("Unexpected SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8921 pVCpu->hm.s.u32HMError = VMX_EXIT_SMI;
8922 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8923}
8924
8925
8926/**
8927 * VM-exit handler for IO SMI (VMX_EXIT_IO_SMI). Unconditional VM-exit.
8928 */
8929HMVMX_EXIT_DECL hmR0VmxExitIoSmi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8930{
8931 /* Same treatment as VMX_EXIT_SMI. See comment in hmR0VmxExitSmi(). */
8932 AssertMsgFailed(("Unexpected IO SMI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8933 pVCpu->hm.s.u32HMError = VMX_EXIT_IO_SMI;
8934 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8935}
8936
8937
8938/**
8939 * VM-exit handler for SIPI (VMX_EXIT_SIPI). Conditional VM-exit.
8940 */
8941HMVMX_EXIT_DECL hmR0VmxExitSipi(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8942{
8943 /*
8944 * SIPI exits can only occur in VMX non-root operation when the "wait-for-SIPI" guest activity state is used. We currently
8945 * don't make use of it (see hmR0VmxLoadGuestActivityState()) as our guests don't have direct access to the host LAPIC.
8946 * See Intel spec. 25.3 "Other Causes of VM-exits".
8947 */
8948 AssertMsgFailed(("Unexpected SIPI VM-exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
8949 pVCpu->hm.s.u32HMError = VMX_EXIT_SIPI;
8950 return VERR_VMX_UNEXPECTED_EXIT_CODE;
8951}
8952
8953
8954/**
8955 * VM-exit handler for INIT signal (VMX_EXIT_INIT_SIGNAL). Unconditional
8956 * VM-exit.
8957 */
8958HMVMX_EXIT_DECL hmR0VmxExitInitSignal(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8959{
8960 /*
8961 * INIT signals are blocked in VMX root operation by VMXON and by SMI in SMM.
8962 * See Intel spec. 33.14.1 Default Treatment of SMI Delivery" and Intel spec. 29.3 "VMX Instructions" for "VMXON".
8963 *
8964 * It is -NOT- blocked in VMX non-root operation so we can, in theory, still get these VM-exits.
8965 * See Intel spec. "23.8 Restrictions on VMX operation".
8966 */
8967 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8968 return VINF_SUCCESS;
8969}
8970
8971
8972/**
8973 * VM-exit handler for triple faults (VMX_EXIT_TRIPLE_FAULT). Unconditional
8974 * VM-exit.
8975 */
8976HMVMX_EXIT_DECL hmR0VmxExitTripleFault(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8977{
8978 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8979 return VINF_EM_RESET;
8980}
8981
8982
8983/**
8984 * VM-exit handler for HLT (VMX_EXIT_HLT). Conditional VM-exit.
8985 */
8986HMVMX_EXIT_DECL hmR0VmxExitHlt(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
8987{
8988 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
8989 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT);
8990 int rc = hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
8991 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
8992 AssertRCReturn(rc, rc);
8993
8994 pMixedCtx->rip++;
8995 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
8996 if (EMShouldContinueAfterHalt(pVCpu, pMixedCtx)) /* Requires eflags. */
8997 rc = VINF_SUCCESS;
8998 else
8999 rc = VINF_EM_HALT;
9000
9001 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
9002 return rc;
9003}
9004
9005
9006/**
9007 * VM-exit handler for instructions that result in a #UD exception delivered to
9008 * the guest.
9009 */
9010HMVMX_EXIT_DECL hmR0VmxExitSetPendingXcptUD(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9011{
9012 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9013 hmR0VmxSetPendingXcptUD(pVCpu, pMixedCtx);
9014 return VINF_SUCCESS;
9015}
9016
9017
9018/**
9019 * VM-exit handler for expiry of the VMX preemption timer.
9020 */
9021HMVMX_EXIT_DECL hmR0VmxExitPreemptTimer(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9022{
9023 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9024
9025 /* If the preemption-timer has expired, reinitialize the preemption timer on next VM-entry. */
9026 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9027
9028 /* If there are any timer events pending, fall back to ring-3, otherwise resume guest execution. */
9029 PVM pVM = pVCpu->CTX_SUFF(pVM);
9030 bool fTimersPending = TMTimerPollBool(pVM, pVCpu);
9031 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPreemptTimer);
9032 return fTimersPending ? VINF_EM_RAW_TIMER_PENDING : VINF_SUCCESS;
9033}
9034
9035
9036/**
9037 * VM-exit handler for XSETBV (VMX_EXIT_XSETBV). Unconditional VM-exit.
9038 */
9039HMVMX_EXIT_DECL hmR0VmxExitXsetbv(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9040{
9041 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9042
9043 /* We expose XSETBV to the guest, fallback to the recompiler for emulation. */
9044 /** @todo check if XSETBV is supported by the recompiler. */
9045 return VERR_EM_INTERPRETER;
9046}
9047
9048
9049/**
9050 * VM-exit handler for INVPCID (VMX_EXIT_INVPCID). Conditional VM-exit.
9051 */
9052HMVMX_EXIT_DECL hmR0VmxExitInvpcid(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9053{
9054 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9055
9056 /* The guest should not invalidate the host CPU's TLBs, fallback to recompiler. */
9057 /** @todo implement EMInterpretInvpcid() */
9058 return VERR_EM_INTERPRETER;
9059}
9060
9061
9062/**
9063 * VM-exit handler for invalid-guest-state (VMX_EXIT_ERR_INVALID_GUEST_STATE).
9064 * Error VM-exit.
9065 */
9066HMVMX_EXIT_DECL hmR0VmxExitErrInvalidGuestState(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9067{
9068 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9069 AssertRCReturn(rc, rc);
9070
9071 uint32_t uInvalidReason = hmR0VmxCheckGuestState(pVCpu->CTX_SUFF(pVM), pVCpu, pMixedCtx);
9072 NOREF(uInvalidReason);
9073
9074#ifdef VBOX_STRICT
9075 uint32_t uIntrState;
9076 HMVMXHCUINTREG uHCReg;
9077 uint64_t u64Val;
9078 uint32_t u32Val;
9079
9080 rc = hmR0VmxReadEntryIntrInfoVmcs(pVmxTransient);
9081 rc |= hmR0VmxReadEntryXcptErrorCodeVmcs(pVmxTransient);
9082 rc |= hmR0VmxReadEntryInstrLenVmcs(pVCpu, pVmxTransient);
9083 rc |= VMXReadVmcs32(VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE, &uIntrState);
9084 AssertRCReturn(rc, rc);
9085
9086 Log4(("uInvalidReason %u\n", uInvalidReason));
9087 Log4(("VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO %#RX32\n", pVmxTransient->uEntryIntrInfo));
9088 Log4(("VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE %#RX32\n", pVmxTransient->uEntryXcptErrorCode));
9089 Log4(("VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH %#RX32\n", pVmxTransient->cbEntryInstr));
9090 Log4(("VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE %#RX32\n", uIntrState));
9091
9092 rc = VMXReadVmcs32(VMX_VMCS_GUEST_CR0, &u32Val); AssertRC(rc);
9093 Log4(("VMX_VMCS_GUEST_CR0 %#RX32\n", u32Val));
9094 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_MASK, &uHCReg); AssertRC(rc);
9095 Log4(("VMX_VMCS_CTRL_CR0_MASK %#RHr\n", uHCReg));
9096 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR0_READ_SHADOW, &uHCReg); AssertRC(rc);
9097 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
9098 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_MASK, &uHCReg); AssertRC(rc);
9099 Log4(("VMX_VMCS_CTRL_CR4_MASK %#RHr\n", uHCReg));
9100 rc = VMXReadVmcsHstN(VMX_VMCS_CTRL_CR4_READ_SHADOW, &uHCReg); AssertRC(rc);
9101 Log4(("VMX_VMCS_CTRL_CR4_READ_SHADOW %#RHr\n", uHCReg));
9102 rc = VMXReadVmcs64(VMX_VMCS64_CTRL_EPTP_FULL, &u64Val); AssertRC(rc);
9103 Log4(("VMX_VMCS64_CTRL_EPTP_FULL %#RX64\n", u64Val));
9104#endif
9105
9106 PVM pVM = pVCpu->CTX_SUFF(pVM);
9107 HMDumpRegs(pVM, pVCpu, pMixedCtx);
9108
9109 return VERR_VMX_INVALID_GUEST_STATE;
9110}
9111
9112
9113/**
9114 * VM-exit handler for VM-entry failure due to an MSR-load
9115 * (VMX_EXIT_ERR_MSR_LOAD). Error VM-exit.
9116 */
9117HMVMX_EXIT_DECL hmR0VmxExitErrMsrLoad(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9118{
9119 AssertMsgFailed(("Unexpected MSR-load exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9120 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9121}
9122
9123
9124/**
9125 * VM-exit handler for VM-entry failure due to a machine-check event
9126 * (VMX_EXIT_ERR_MACHINE_CHECK). Error VM-exit.
9127 */
9128HMVMX_EXIT_DECL hmR0VmxExitErrMachineCheck(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9129{
9130 AssertMsgFailed(("Unexpected machine-check event exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9131 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9132}
9133
9134
9135/**
9136 * VM-exit handler for all undefined reasons. Should never ever happen.. in
9137 * theory.
9138 */
9139HMVMX_EXIT_DECL hmR0VmxExitErrUndefined(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9140{
9141 AssertMsgFailed(("Huh!? Undefined VM-exit reason %d. pVCpu=%p pMixedCtx=%p\n", pVmxTransient->uExitReason, pVCpu, pMixedCtx));
9142 return VERR_VMX_UNDEFINED_EXIT_CODE;
9143}
9144
9145
9146/**
9147 * VM-exit handler for XDTR (LGDT, SGDT, LIDT, SIDT) accesses
9148 * (VMX_EXIT_XDTR_ACCESS) and LDT and TR access (LLDT, LTR, SLDT, STR).
9149 * Conditional VM-exit.
9150 */
9151HMVMX_EXIT_DECL hmR0VmxExitXdtrAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9152{
9153 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9154
9155 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT. */
9156 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitXdtrAccess);
9157 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT)
9158 return VERR_EM_INTERPRETER;
9159 AssertMsgFailed(("Unexpected XDTR access. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9160 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9161}
9162
9163
9164/**
9165 * VM-exit handler for RDRAND (VMX_EXIT_RDRAND). Conditional VM-exit.
9166 */
9167HMVMX_EXIT_DECL hmR0VmxExitRdrand(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9168{
9169 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9170
9171 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT. */
9172 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdrand);
9173 if (pVCpu->hm.s.vmx.u32ProcCtls2 & VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT)
9174 return VERR_EM_INTERPRETER;
9175 AssertMsgFailed(("Unexpected RDRAND exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9176 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9177}
9178
9179
9180/**
9181 * VM-exit handler for RDMSR (VMX_EXIT_RDMSR).
9182 */
9183HMVMX_EXIT_DECL hmR0VmxExitRdmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9184{
9185 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9186
9187 /* EMInterpretRdmsr() requires CR0, Eflags and SS segment register. */
9188 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9189 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9190 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9191 AssertRCReturn(rc, rc);
9192 Log4(("CS:RIP=%04x:%#RX64 ECX=%X\n", pMixedCtx->cs.Sel, pMixedCtx->rip, pMixedCtx->ecx));
9193
9194 PVM pVM = pVCpu->CTX_SUFF(pVM);
9195 rc = EMInterpretRdmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9196 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER,
9197 ("hmR0VmxExitRdmsr: failed, invalid error code %Rrc\n", rc));
9198 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitRdmsr);
9199
9200 if (RT_LIKELY(rc == VINF_SUCCESS))
9201 {
9202 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9203 Assert(pVmxTransient->cbInstr == 2);
9204 }
9205 return rc;
9206}
9207
9208
9209/**
9210 * VM-exit handler for WRMSR (VMX_EXIT_WRMSR).
9211 */
9212HMVMX_EXIT_DECL hmR0VmxExitWrmsr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9213{
9214 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9215 PVM pVM = pVCpu->CTX_SUFF(pVM);
9216 int rc = VINF_SUCCESS;
9217
9218 /* EMInterpretWrmsr() requires CR0, EFLAGS and SS segment register. */
9219 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9220 rc |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx);
9221 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9222 AssertRCReturn(rc, rc);
9223 Log4(("ecx=%#RX32\n", pMixedCtx->ecx));
9224
9225 rc = EMInterpretWrmsr(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
9226 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER, ("hmR0VmxExitWrmsr: failed, invalid error code %Rrc\n", rc));
9227 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitWrmsr);
9228
9229 if (RT_LIKELY(rc == VINF_SUCCESS))
9230 {
9231 rc = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9232
9233 /* If this is an X2APIC WRMSR access, update the APIC state as well. */
9234 if ( pMixedCtx->ecx >= MSR_IA32_X2APIC_START
9235 && pMixedCtx->ecx <= MSR_IA32_X2APIC_END)
9236 {
9237 /* We've already saved the APIC related guest-state (TPR) in hmR0VmxPostRunGuest(). When full APIC register
9238 * virtualization is implemented we'll have to make sure APIC state is saved from the VMCS before
9239 EMInterpretWrmsr() changes it. */
9240 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
9241 }
9242 else if (pMixedCtx->ecx == MSR_K6_EFER) /* EFER is the only MSR we auto-load but don't allow write-passthrough. */
9243 {
9244 rc = hmR0VmxSaveGuestAutoLoadStoreMsrs(pVCpu, pMixedCtx);
9245 AssertRCReturn(rc, rc);
9246 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS;
9247 }
9248 else if (pMixedCtx->ecx == MSR_IA32_TSC) /* Windows 7 does this during bootup. See @bugref{6398}. */
9249 pVmxTransient->fUpdateTscOffsettingAndPreemptTimer = true;
9250
9251 /* Update MSRs that are part of the VMCS when MSR-bitmaps are not supported. */
9252 if (!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS))
9253 {
9254 switch (pMixedCtx->ecx)
9255 {
9256 case MSR_IA32_SYSENTER_CS: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_CS_MSR; break;
9257 case MSR_IA32_SYSENTER_EIP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_EIP_MSR; break;
9258 case MSR_IA32_SYSENTER_ESP: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SYSENTER_ESP_MSR; break;
9259 case MSR_K8_FS_BASE: /* no break */
9260 case MSR_K8_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_SEGMENT_REGS; break;
9261 case MSR_K8_KERNEL_GS_BASE: pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_AUTO_MSRS; break;
9262 }
9263 }
9264#ifdef VBOX_STRICT
9265 else
9266 {
9267 /* Paranoia. Validate that MSRs in the MSR-bitmaps with write-passthru are not intercepted. */
9268 switch (pMixedCtx->ecx)
9269 {
9270 case MSR_IA32_SYSENTER_CS:
9271 case MSR_IA32_SYSENTER_EIP:
9272 case MSR_IA32_SYSENTER_ESP:
9273 case MSR_K8_FS_BASE:
9274 case MSR_K8_GS_BASE:
9275 {
9276 AssertMsgFailed(("Unexpected WRMSR for an MSR in the VMCS. ecx=%#RX32\n", pMixedCtx->ecx));
9277 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9278 }
9279
9280 case MSR_K8_LSTAR:
9281 case MSR_K6_STAR:
9282 case MSR_K8_SF_MASK:
9283 case MSR_K8_TSC_AUX:
9284 case MSR_K8_KERNEL_GS_BASE:
9285 {
9286 AssertMsgFailed(("Unexpected WRMSR for an MSR in the auto-load/store area in the VMCS. ecx=%#RX32\n",
9287 pMixedCtx->ecx));
9288 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9289 }
9290 }
9291 }
9292#endif /* VBOX_STRICT */
9293 }
9294 return rc;
9295}
9296
9297
9298/**
9299 * VM-exit handler for PAUSE (VMX_EXIT_PAUSE). Conditional VM-exit.
9300 */
9301HMVMX_EXIT_DECL hmR0VmxExitPause(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9302{
9303 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9304
9305 /* By default, we don't enable VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT. */
9306 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPause);
9307 if (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT)
9308 return VERR_EM_INTERPRETER;
9309 AssertMsgFailed(("Unexpected PAUSE exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9310 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9311}
9312
9313
9314/**
9315 * VM-exit handler for when the TPR value is lowered below the specified
9316 * threshold (VMX_EXIT_TPR_BELOW_THRESHOLD). Conditional VM-exit.
9317 */
9318HMVMX_EXIT_DECL hmR0VmxExitTprBelowThreshold(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9319{
9320 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9321 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW);
9322
9323 /*
9324 * The TPR has already been updated, see hmR0VMXPostRunGuest(). RIP is also updated as part of the VM-exit by VT-x. Update
9325 * the threshold in the VMCS, deliver the pending interrupt via hmR0VmxPreRunGuest()->hmR0VmxInjectEvent() and
9326 * resume guest execution.
9327 */
9328 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
9329 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTprBelowThreshold);
9330 return VINF_SUCCESS;
9331}
9332
9333
9334/**
9335 * VM-exit handler for control-register accesses (VMX_EXIT_MOV_CRX). Conditional
9336 * VM-exit.
9337 *
9338 * @retval VINF_SUCCESS when guest execution can continue.
9339 * @retval VINF_PGM_CHANGE_MODE when shadow paging mode changed, back to ring-3.
9340 * @retval VINF_PGM_SYNC_CR3 CR3 sync is required, back to ring-3.
9341 * @retval VERR_EM_INTERPRETER when something unexpected happened, fallback to
9342 * recompiler.
9343 */
9344HMVMX_EXIT_DECL hmR0VmxExitMovCRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9345{
9346 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9347 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitMovCRx, y2);
9348 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9349 AssertRCReturn(rc, rc);
9350
9351 const RTGCUINTPTR uExitQualification = pVmxTransient->uExitQualification;
9352 const uint32_t uAccessType = VMX_EXIT_QUALIFICATION_CRX_ACCESS(uExitQualification);
9353 PVM pVM = pVCpu->CTX_SUFF(pVM);
9354 switch (uAccessType)
9355 {
9356 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE: /* MOV to CRx */
9357 {
9358#if 0
9359 /* EMInterpretCRxWrite() references a lot of guest state (EFER, RFLAGS, Segment Registers, etc.) Sync entire state */
9360 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9361#else
9362 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9363 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9364 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9365#endif
9366 AssertRCReturn(rc, rc);
9367
9368 rc = EMInterpretCRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9369 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification),
9370 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification));
9371 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_PGM_SYNC_CR3);
9372
9373 switch (VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification))
9374 {
9375 case 0: /* CR0 */
9376 Log4(("CRX CR0 write rc=%d CR0=%#RX64\n", rc, pMixedCtx->cr0));
9377 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
9378 break;
9379 case 2: /* C2 **/
9380 /* Nothing to do here, CR2 it's not part of the VMCS. */
9381 break;
9382 case 3: /* CR3 */
9383 Assert(!pVM->hm.s.fNestedPaging || !CPUMIsGuestPagingEnabledEx(pMixedCtx));
9384 Log4(("CRX CR3 write rc=%d CR3=%#RX64\n", rc, pMixedCtx->cr3));
9385 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR3;
9386 break;
9387 case 4: /* CR4 */
9388 Log4(("CRX CR4 write rc=%d CR4=%#RX64\n", rc, pMixedCtx->cr4));
9389 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR4;
9390 break;
9391 case 8: /* CR8 */
9392 Assert(!(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
9393 /* CR8 contains the APIC TPR. Was updated by EMInterpretCRxWrite(). */
9394 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_VMX_GUEST_APIC_STATE;
9395 break;
9396 default:
9397 AssertMsgFailed(("Invalid CRx register %#x\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)));
9398 break;
9399 }
9400
9401 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxWrite[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
9402 break;
9403 }
9404
9405 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ: /* MOV from CRx */
9406 {
9407 /* EMInterpretCRxRead() requires EFER MSR, CS. */
9408 rc = hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9409 AssertRCReturn(rc, rc);
9410 Assert( !pVM->hm.s.fNestedPaging
9411 || !CPUMIsGuestPagingEnabledEx(pMixedCtx)
9412 || VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 3);
9413
9414 /* CR8 reads only cause a VM-exit when the TPR shadow feature isn't enabled. */
9415 Assert( VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification) != 8
9416 || !(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW));
9417
9418 rc = EMInterpretCRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9419 VMX_EXIT_QUALIFICATION_CRX_GENREG(uExitQualification),
9420 VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification));
9421 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9422 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCRxRead[VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification)]);
9423 Log4(("CRX CR%d Read access rc=%d\n", VMX_EXIT_QUALIFICATION_CRX_REGISTER(uExitQualification), rc));
9424 break;
9425 }
9426
9427 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS: /* CLTS (Clear Task-Switch Flag in CR0) */
9428 {
9429 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9430 AssertRCReturn(rc, rc);
9431 rc = EMInterpretCLTS(pVM, pVCpu);
9432 AssertRCReturn(rc, rc);
9433 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
9434 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitClts);
9435 Log4(("CRX CLTS write rc=%d\n", rc));
9436 break;
9437 }
9438
9439 case VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW: /* LMSW (Load Machine-Status Word into CR0) */
9440 {
9441 rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
9442 AssertRCReturn(rc, rc);
9443 rc = EMInterpretLMSW(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(uExitQualification));
9444 if (RT_LIKELY(rc == VINF_SUCCESS))
9445 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
9446 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitLmsw);
9447 Log4(("CRX LMSW write rc=%d\n", rc));
9448 break;
9449 }
9450
9451 default:
9452 {
9453 AssertMsgFailed(("Invalid access-type in Mov CRx exit qualification %#x\n", uAccessType));
9454 rc = VERR_VMX_UNEXPECTED_EXCEPTION;
9455 }
9456 }
9457
9458 /* Validate possible error codes. */
9459 Assert(rc == VINF_SUCCESS || rc == VINF_PGM_CHANGE_MODE || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_SYNC_CR3
9460 || rc == VERR_VMX_UNEXPECTED_EXCEPTION);
9461 if (RT_SUCCESS(rc))
9462 {
9463 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9464 AssertRCReturn(rc2, rc2);
9465 }
9466
9467 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitMovCRx, y2);
9468 return rc;
9469}
9470
9471
9472/**
9473 * VM-exit handler for I/O instructions (VMX_EXIT_IO_INSTR). Conditional
9474 * VM-exit.
9475 */
9476HMVMX_EXIT_DECL hmR0VmxExitIoInstr(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9477{
9478 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9479 STAM_PROFILE_ADV_START(&pVCpu->hm.s.StatExitIO, y1);
9480
9481 int rc2 = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9482 rc2 |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
9483 rc2 |= hmR0VmxSaveGuestRip(pVCpu, pMixedCtx);
9484 rc2 |= hmR0VmxSaveGuestRflags(pVCpu, pMixedCtx); /* Eflag checks in EMInterpretDisasCurrent(). */
9485 rc2 |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx); /* CR0 checks & PGM* in EMInterpretDisasCurrent(). */
9486 rc2 |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx); /* SELM checks in EMInterpretDisasCurrent(). */
9487 /* EFER also required for longmode checks in EMInterpretDisasCurrent(), but it's always up-to-date. */
9488 AssertRCReturn(rc2, rc2);
9489
9490 /* Refer Intel spec. 27-5. "Exit Qualifications for I/O Instructions" for the format. */
9491 uint32_t uIOPort = VMX_EXIT_QUALIFICATION_IO_PORT(pVmxTransient->uExitQualification);
9492 uint8_t uIOWidth = VMX_EXIT_QUALIFICATION_IO_WIDTH(pVmxTransient->uExitQualification);
9493 bool fIOWrite = ( VMX_EXIT_QUALIFICATION_IO_DIRECTION(pVmxTransient->uExitQualification)
9494 == VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT);
9495 bool fIOString = VMX_EXIT_QUALIFICATION_IO_IS_STRING(pVmxTransient->uExitQualification);
9496 AssertReturn(uIOWidth <= 3 && uIOWidth != 2, VERR_HMVMX_IPE_1);
9497
9498 /* I/O operation lookup arrays. */
9499 static const uint32_t s_aIOSizes[4] = { 1, 2, 0, 4 }; /* Size of the I/O accesses. */
9500 static const uint32_t s_aIOOpAnd[4] = { 0xff, 0xffff, 0, 0xffffffff }; /* AND masks for saving the result (in AL/AX/EAX). */
9501
9502 VBOXSTRICTRC rcStrict;
9503 const uint32_t cbValue = s_aIOSizes[uIOWidth];
9504 const uint32_t cbInstr = pVmxTransient->cbInstr;
9505 bool fUpdateRipAlready = false; /* ugly hack, should be temporary. */
9506 PVM pVM = pVCpu->CTX_SUFF(pVM);
9507 if (fIOString)
9508 {
9509 /*
9510 * INS/OUTS - I/O String instruction.
9511 *
9512 * Use instruction-information if available, otherwise fall back on
9513 * interpreting the instruction.
9514 */
9515 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c str\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
9516#if 0 /* Not quite ready, seem iSegReg assertion trigger once... Do we perhaps need to always read that in longjmp / preempt scenario? */
9517 AssertReturn(pMixedCtx->dx == uIOPort, VERR_HMVMX_IPE_2);
9518 if (MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(pVM->hm.s.vmx.msr.vmx_basic_info))
9519 {
9520 rc2 = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
9521 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
9522 rc2 |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9523 AssertRCReturn(rc2, rc2);
9524 AssertReturn(pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize <= 2, VERR_HMVMX_IPE_3);
9525 AssertCompile(IEMMODE_16BIT == 0 && IEMMODE_32BIT == 1 && IEMMODE_64BIT == 2);
9526 IEMMODE enmAddrMode = (IEMMODE)pVmxTransient->ExitInstrInfo.StrIo.u3AddrSize;
9527 bool fRep = VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification);
9528 if (fIOWrite)
9529 {
9530 rcStrict = IEMExecStringIoWrite(pVCpu, cbValue, enmAddrMode, fRep, cbInstr,
9531 pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
9532 //if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9533 // hmR0SavePendingIOPortWriteStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr,
9534 // pVmxTransient->ExitInstrInfo.StrIo.iSegReg);
9535 }
9536 else
9537 {
9538 AssertMsgReturn(pVmxTransient->ExitInstrInfo.StrIo.iSegReg == X86_SREG_ES,
9539 ("%#x (%#llx)\n", pVmxTransient->ExitInstrInfo.StrIo.iSegReg, pVmxTransient->ExitInstrInfo.u),
9540 VERR_HMVMX_IPE_4);
9541 rcStrict = IEMExecStringIoRead(pVCpu, cbValue, enmAddrMode, fRep, cbInstr);
9542 //if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9543 // hmR0SavePendingIOPortReadStr(pVCpu, pMixedCtx->rip, cbValue, enmAddrMode, fRep, cbInstr);
9544 }
9545 }
9546 else
9547 {
9548 /** @todo optimize this, IEM should request the additional state if it needs it (GP, PF, ++). */
9549 rc2 = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9550 AssertRCReturn(rc2, rc2);
9551 rcStrict = IEMExecOne(pVCpu);
9552 }
9553 /** @todo IEM needs to be setting these flags somehow. */
9554 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
9555 fUpdateRipAlready = true;
9556#else
9557 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
9558 rcStrict = EMInterpretDisasCurrent(pVM, pVCpu, pDis, NULL);
9559 if (RT_SUCCESS(rcStrict))
9560 {
9561 if (fIOWrite)
9562 {
9563 rcStrict = IOMInterpretOUTSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
9564 (DISCPUMODE)pDis->uAddrMode, cbValue);
9565 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringWrite);
9566 }
9567 else
9568 {
9569 rcStrict = IOMInterpretINSEx(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx), uIOPort, pDis->fPrefix,
9570 (DISCPUMODE)pDis->uAddrMode, cbValue);
9571 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOStringRead);
9572 }
9573 }
9574 else
9575 {
9576 AssertMsg(rcStrict == VERR_EM_INTERPRETER, ("rcStrict=%Rrc RIP %#RX64\n", VBOXSTRICTRC_VAL(rcStrict), pMixedCtx->rip));
9577 rcStrict = VINF_EM_RAW_EMULATE_INSTR;
9578 }
9579#endif
9580 }
9581 else
9582 {
9583 /*
9584 * IN/OUT - I/O instruction.
9585 */
9586 Log4(("CS:RIP=%04x:%#RX64 %#06x/%u %c\n", pMixedCtx->cs.Sel, pMixedCtx->rip, uIOPort, cbValue, fIOWrite ? 'w' : 'r'));
9587 const uint32_t uAndVal = s_aIOOpAnd[uIOWidth];
9588 Assert(!VMX_EXIT_QUALIFICATION_IO_IS_REP(pVmxTransient->uExitQualification));
9589 if (fIOWrite)
9590 {
9591 rcStrict = IOMIOPortWrite(pVM, pVCpu, uIOPort, pMixedCtx->eax & uAndVal, cbValue);
9592 if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9593 HMR0SavePendingIOPortWrite(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
9594 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIOWrite);
9595 }
9596 else
9597 {
9598 uint32_t u32Result = 0;
9599 rcStrict = IOMIOPortRead(pVM, pVCpu, uIOPort, &u32Result, cbValue);
9600 if (IOM_SUCCESS(rcStrict))
9601 {
9602 /* Save result of I/O IN instr. in AL/AX/EAX. */
9603 pMixedCtx->eax = (pMixedCtx->eax & ~uAndVal) | (u32Result & uAndVal);
9604 }
9605 else if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9606 HMR0SavePendingIOPortRead(pVCpu, pMixedCtx->rip, pMixedCtx->rip + cbInstr, uIOPort, uAndVal, cbValue);
9607 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIORead);
9608 }
9609 }
9610
9611 if (IOM_SUCCESS(rcStrict))
9612 {
9613 if (!fUpdateRipAlready)
9614 {
9615 pMixedCtx->rip += cbInstr;
9616 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
9617 }
9618
9619 /*
9620 * If any I/O breakpoints are armed, we need to check if one triggered
9621 * and take appropriate action.
9622 * Note that the I/O breakpoint type is undefined if CR4.DE is 0.
9623 */
9624 rc2 = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
9625 AssertRCReturn(rc2, rc2);
9626
9627 /** @todo Optimize away the DBGFBpIsHwIoArmed call by having DBGF tell the
9628 * execution engines about whether hyper BPs and such are pending. */
9629 uint32_t const uDr7 = pMixedCtx->dr[7];
9630 if (RT_UNLIKELY( ( (uDr7 & X86_DR7_ENABLED_MASK)
9631 && X86_DR7_ANY_RW_IO(uDr7)
9632 && (pMixedCtx->cr4 & X86_CR4_DE))
9633 || DBGFBpIsHwIoArmed(pVM)))
9634 {
9635 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxIoCheck);
9636
9637 /* We're playing with the host CPU state here, make sure we don't preempt. */
9638 HM_DISABLE_PREEMPT_IF_NEEDED();
9639 bool fIsGuestDbgActive = CPUMR0DebugStateMaybeSaveGuest(pVCpu, true /*fDr6*/);
9640
9641 VBOXSTRICTRC rcStrict2 = DBGFBpCheckIo(pVM, pVCpu, pMixedCtx, uIOPort, cbValue);
9642 if (rcStrict2 == VINF_EM_RAW_GUEST_TRAP)
9643 {
9644 /* Raise #DB. */
9645 if (fIsGuestDbgActive)
9646 ASMSetDR6(pMixedCtx->dr[6]);
9647 if (pMixedCtx->dr[7] != uDr7)
9648 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
9649
9650 hmR0VmxSetPendingXcptDB(pVCpu, pMixedCtx);
9651 }
9652 /* rcStrict is VINF_SUCCESS or in [VINF_EM_FIRST..VINF_EM_LAST]. */
9653 else if ( rcStrict2 != VINF_SUCCESS
9654 && (rcStrict == VINF_SUCCESS || rcStrict2 < rcStrict))
9655 rcStrict = rcStrict2;
9656
9657 HM_RESTORE_PREEMPT_IF_NEEDED();
9658 }
9659 }
9660
9661#ifdef DEBUG
9662 if (rcStrict == VINF_IOM_R3_IOPORT_READ)
9663 Assert(!fIOWrite);
9664 else if (rcStrict == VINF_IOM_R3_IOPORT_WRITE)
9665 Assert(fIOWrite);
9666 else
9667 {
9668 /** @todo r=bird: This is missing a bunch of VINF_EM_FIRST..VINF_EM_LAST
9669 * statuses, that the VMM device and some others may return. See
9670 * IOM_SUCCESS() for guidance. */
9671 AssertMsg( RT_FAILURE(rcStrict)
9672 || rcStrict == VINF_SUCCESS
9673 || rcStrict == VINF_EM_RAW_EMULATE_INSTR
9674 || rcStrict == VINF_EM_DBG_BREAKPOINT
9675 || rcStrict == VINF_EM_RAW_GUEST_TRAP
9676 || rcStrict == VINF_TRPM_XCPT_DISPATCHED, ("%Rrc\n", VBOXSTRICTRC_VAL(rcStrict)));
9677 }
9678#endif
9679
9680 STAM_PROFILE_ADV_STOP(&pVCpu->hm.s.StatExitIO, y1);
9681 return VBOXSTRICTRC_TODO(rcStrict);
9682}
9683
9684
9685/**
9686 * VM-exit handler for task switches (VMX_EXIT_TASK_SWITCH). Unconditional
9687 * VM-exit.
9688 */
9689HMVMX_EXIT_DECL hmR0VmxExitTaskSwitch(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9690{
9691 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9692
9693 /* Check if this task-switch occurred while delivery an event through the guest IDT. */
9694 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9695 AssertRCReturn(rc, rc);
9696 if (VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT)
9697 {
9698 rc = hmR0VmxReadIdtVectoringInfoVmcs(pVmxTransient);
9699 AssertRCReturn(rc, rc);
9700 if (VMX_IDT_VECTORING_INFO_VALID(pVmxTransient->uIdtVectoringInfo))
9701 {
9702 uint32_t uIntType = VMX_IDT_VECTORING_INFO_TYPE(pVmxTransient->uIdtVectoringInfo);
9703
9704 /* Software interrupts and exceptions will be regenerated when the recompiler restarts the instruction. */
9705 if ( uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_INT
9706 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT
9707 && uIntType != VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT)
9708 {
9709 uint32_t uVector = VMX_IDT_VECTORING_INFO_VECTOR(pVmxTransient->uIdtVectoringInfo);
9710 bool fErrorCodeValid = !!VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(pVmxTransient->uIdtVectoringInfo);
9711
9712 /* Save it as a pending event and it'll be converted to a TRPM event on the way out to ring-3. */
9713 Assert(!pVCpu->hm.s.Event.fPending);
9714 pVCpu->hm.s.Event.fPending = true;
9715 pVCpu->hm.s.Event.u64IntrInfo = pVmxTransient->uIdtVectoringInfo;
9716 rc = hmR0VmxReadIdtVectoringErrorCodeVmcs(pVmxTransient);
9717 AssertRCReturn(rc, rc);
9718 if (fErrorCodeValid)
9719 pVCpu->hm.s.Event.u32ErrCode = pVmxTransient->uIdtVectoringErrorCode;
9720 else
9721 pVCpu->hm.s.Event.u32ErrCode = 0;
9722 if ( uIntType == VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT
9723 && uVector == X86_XCPT_PF)
9724 {
9725 pVCpu->hm.s.Event.GCPtrFaultAddress = pMixedCtx->cr2;
9726 }
9727
9728 Log4(("Pending event on TaskSwitch uIntType=%#x uVector=%#x\n", uIntType, uVector));
9729 }
9730 }
9731 }
9732
9733 /** @todo Emulate task switch someday, currently just going back to ring-3 for
9734 * emulation. */
9735 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitTaskSwitch);
9736 return VERR_EM_INTERPRETER;
9737}
9738
9739
9740/**
9741 * VM-exit handler for monitor-trap-flag (VMX_EXIT_MTF). Conditional VM-exit.
9742 */
9743HMVMX_EXIT_DECL hmR0VmxExitMtf(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9744{
9745 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9746 Assert(pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG);
9747 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG;
9748 int rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
9749 AssertRCReturn(rc, rc);
9750 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitMtf);
9751 return VINF_EM_DBG_STEPPED;
9752}
9753
9754
9755/**
9756 * VM-exit handler for APIC access (VMX_EXIT_APIC_ACCESS). Conditional VM-exit.
9757 */
9758HMVMX_EXIT_DECL hmR0VmxExitApicAccess(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9759{
9760 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9761
9762 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9763 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9764 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9765 return VINF_SUCCESS;
9766 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9767 return rc;
9768
9769#if 0
9770 /** @todo Investigate if IOMMMIOPhysHandler() requires a lot of state, for now
9771 * just sync the whole thing. */
9772 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
9773#else
9774 /* Aggressive state sync. for now. */
9775 rc = hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9776 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9777 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9778#endif
9779 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9780 AssertRCReturn(rc, rc);
9781
9782 /* See Intel spec. 27-6 "Exit Qualifications for APIC-access VM-exits from Linear Accesses & Guest-Phyiscal Addresses" */
9783 uint32_t uAccessType = VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(pVmxTransient->uExitQualification);
9784 switch (uAccessType)
9785 {
9786 case VMX_APIC_ACCESS_TYPE_LINEAR_WRITE:
9787 case VMX_APIC_ACCESS_TYPE_LINEAR_READ:
9788 {
9789 if ( (pVCpu->hm.s.vmx.u32ProcCtls & VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW)
9790 && VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification) == 0x80)
9791 {
9792 AssertMsgFailed(("hmR0VmxExitApicAccess: can't access TPR offset while using TPR shadowing.\n"));
9793 }
9794
9795 RTGCPHYS GCPhys = pMixedCtx->msrApicBase; /* Always up-to-date, msrApicBase is not part of the VMCS. */
9796 GCPhys &= PAGE_BASE_GC_MASK;
9797 GCPhys += VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification);
9798 PVM pVM = pVCpu->CTX_SUFF(pVM);
9799 Log4(("ApicAccess uAccessType=%#x GCPhys=%#RGv Off=%#x\n", uAccessType, GCPhys,
9800 VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(pVmxTransient->uExitQualification)));
9801
9802 VBOXSTRICTRC rc2 = IOMMMIOPhysHandler(pVM, pVCpu,
9803 (uAccessType == VMX_APIC_ACCESS_TYPE_LINEAR_READ) ? 0 : X86_TRAP_PF_RW,
9804 CPUMCTX2CORE(pMixedCtx), GCPhys);
9805 rc = VBOXSTRICTRC_VAL(rc2);
9806 Log4(("ApicAccess rc=%d\n", rc));
9807 if ( rc == VINF_SUCCESS
9808 || rc == VERR_PAGE_TABLE_NOT_PRESENT
9809 || rc == VERR_PAGE_NOT_PRESENT)
9810 {
9811 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9812 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9813 rc = VINF_SUCCESS;
9814 }
9815 break;
9816 }
9817
9818 default:
9819 Log4(("ApicAccess uAccessType=%#x\n", uAccessType));
9820 rc = VINF_EM_RAW_EMULATE_INSTR;
9821 break;
9822 }
9823
9824 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitApicAccess);
9825 return rc;
9826}
9827
9828
9829/**
9830 * VM-exit handler for debug-register accesses (VMX_EXIT_MOV_DRX). Conditional
9831 * VM-exit.
9832 */
9833HMVMX_EXIT_DECL hmR0VmxExitMovDRx(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9834{
9835 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9836
9837 /* We should -not- get this VM-exit if the guest is debugging. */
9838 if (CPUMIsGuestDebugStateActive(pVCpu))
9839 {
9840 AssertMsgFailed(("Unexpected MOV DRx exit. pVCpu=%p pMixedCtx=%p\n", pVCpu, pMixedCtx));
9841 return VERR_VMX_UNEXPECTED_EXIT_CODE;
9842 }
9843
9844 int rc = VERR_INTERNAL_ERROR_5;
9845 if ( !DBGFIsStepping(pVCpu)
9846 && !pVCpu->hm.s.fSingleInstruction
9847 && !CPUMIsHyperDebugStateActive(pVCpu))
9848 {
9849 /* Don't intercept MOV DRx and #DB any more. */
9850 pVCpu->hm.s.vmx.u32ProcCtls &= ~VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT;
9851 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_PROC_EXEC, pVCpu->hm.s.vmx.u32ProcCtls);
9852 AssertRCReturn(rc, rc);
9853
9854 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
9855 {
9856#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
9857 pVCpu->hm.s.vmx.u32XcptBitmap &= ~RT_BIT(X86_XCPT_DB);
9858 rc = VMXWriteVmcs32(VMX_VMCS32_CTRL_EXCEPTION_BITMAP, pVCpu->hm.s.vmx.u32XcptBitmap);
9859 AssertRCReturn(rc, rc);
9860#endif
9861 }
9862
9863 /* We're playing with the host CPU state here, make sure we can't preempt. */
9864 HM_DISABLE_PREEMPT_IF_NEEDED();
9865
9866 /* Save the host & load the guest debug state, restart execution of the MOV DRx instruction. */
9867 PVM pVM = pVCpu->CTX_SUFF(pVM);
9868 CPUMR0LoadGuestDebugState(pVCpu, true /* include DR6 */);
9869 Assert(CPUMIsGuestDebugStateActive(pVCpu) || HC_ARCH_BITS == 32);
9870
9871 HM_RESTORE_PREEMPT_IF_NEEDED();
9872
9873#ifdef VBOX_WITH_STATISTICS
9874 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9875 AssertRCReturn(rc, rc);
9876 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
9877 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
9878 else
9879 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
9880#endif
9881 STAM_COUNTER_INC(&pVCpu->hm.s.StatDRxContextSwitch);
9882 return VINF_SUCCESS;
9883 }
9884
9885 /*
9886 * EMInterpretDRx[Write|Read]() calls CPUMIsGuestIn64BitCode() which requires EFER, CS. EFER is always up-to-date, see
9887 * hmR0VmxSaveGuestAutoLoadStoreMsrs(). Update only the segment registers from the CPU.
9888 */
9889 rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9890 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9891 AssertRCReturn(rc, rc);
9892 Log4(("CS:RIP=%04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
9893
9894 PVM pVM = pVCpu->CTX_SUFF(pVM);
9895 if (VMX_EXIT_QUALIFICATION_DRX_DIRECTION(pVmxTransient->uExitQualification) == VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE)
9896 {
9897 rc = EMInterpretDRxWrite(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9898 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification),
9899 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification));
9900 if (RT_SUCCESS(rc))
9901 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_DEBUG;
9902 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxWrite);
9903 }
9904 else
9905 {
9906 rc = EMInterpretDRxRead(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx),
9907 VMX_EXIT_QUALIFICATION_DRX_GENREG(pVmxTransient->uExitQualification),
9908 VMX_EXIT_QUALIFICATION_DRX_REGISTER(pVmxTransient->uExitQualification));
9909 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitDRxRead);
9910 }
9911
9912 Assert(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER);
9913 if (RT_SUCCESS(rc))
9914 {
9915 int rc2 = hmR0VmxAdvanceGuestRip(pVCpu, pMixedCtx, pVmxTransient);
9916 AssertRCReturn(rc2, rc2);
9917 }
9918 return rc;
9919}
9920
9921
9922/**
9923 * VM-exit handler for EPT misconfiguration (VMX_EXIT_EPT_MISCONFIG).
9924 * Conditional VM-exit.
9925 */
9926HMVMX_EXIT_DECL hmR0VmxExitEptMisconfig(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9927{
9928 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9929 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
9930
9931 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9932 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9933 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9934 return VINF_SUCCESS;
9935 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9936 return rc;
9937
9938 RTGCPHYS GCPhys = 0;
9939 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
9940
9941#if 0
9942 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
9943#else
9944 /* Aggressive state sync. for now. */
9945 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9946 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
9947 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
9948#endif
9949 AssertRCReturn(rc, rc);
9950
9951 /*
9952 * If we succeed, resume guest execution.
9953 * If we fail in interpreting the instruction because we couldn't get the guest physical address
9954 * of the page containing the instruction via the guest's page tables (we would invalidate the guest page
9955 * in the host TLB), resume execution which would cause a guest page fault to let the guest handle this
9956 * weird case. See @bugref{6043}.
9957 */
9958 PVM pVM = pVCpu->CTX_SUFF(pVM);
9959 VBOXSTRICTRC rc2 = PGMR0Trap0eHandlerNPMisconfig(pVM, pVCpu, PGMMODE_EPT, CPUMCTX2CORE(pMixedCtx), GCPhys, UINT32_MAX);
9960 rc = VBOXSTRICTRC_VAL(rc2);
9961 Log4(("EPT misconfig at %#RGv RIP=%#RX64 rc=%d\n", GCPhys, pMixedCtx->rip, rc));
9962 if ( rc == VINF_SUCCESS
9963 || rc == VERR_PAGE_TABLE_NOT_PRESENT
9964 || rc == VERR_PAGE_NOT_PRESENT)
9965 {
9966 /* Successfully handled MMIO operation. */
9967 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
9968 | HM_CHANGED_VMX_GUEST_APIC_STATE;
9969 rc = VINF_SUCCESS;
9970 }
9971 return rc;
9972}
9973
9974
9975/**
9976 * VM-exit handler for EPT violation (VMX_EXIT_EPT_VIOLATION). Conditional
9977 * VM-exit.
9978 */
9979HMVMX_EXIT_DECL hmR0VmxExitEptViolation(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
9980{
9981 HMVMX_VALIDATE_EXIT_HANDLER_PARAMS();
9982 Assert(pVCpu->CTX_SUFF(pVM)->hm.s.fNestedPaging);
9983
9984 /* If this VM-exit occurred while delivering an event through the guest IDT, handle it accordingly. */
9985 int rc = hmR0VmxCheckExitDueToEventDelivery(pVCpu, pMixedCtx, pVmxTransient);
9986 if (RT_UNLIKELY(rc == VINF_HM_DOUBLE_FAULT))
9987 return VINF_SUCCESS;
9988 else if (RT_UNLIKELY(rc == VINF_EM_RESET))
9989 return rc;
9990
9991 RTGCPHYS GCPhys = 0;
9992 rc = VMXReadVmcs64(VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL, &GCPhys);
9993 rc |= hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
9994#if 0
9995 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx); /** @todo Can we do better? */
9996#else
9997 /* Aggressive state sync. for now. */
9998 rc |= hmR0VmxSaveGuestRipRspRflags(pVCpu, pMixedCtx);
9999 rc |= hmR0VmxSaveGuestControlRegs(pVCpu, pMixedCtx);
10000 rc |= hmR0VmxSaveGuestSegmentRegs(pVCpu, pMixedCtx);
10001#endif
10002 AssertRCReturn(rc, rc);
10003
10004 /* Intel spec. Table 27-7 "Exit Qualifications for EPT violations". */
10005 AssertMsg(((pVmxTransient->uExitQualification >> 7) & 3) != 2, ("%#RX64", pVmxTransient->uExitQualification));
10006
10007 RTGCUINT uErrorCode = 0;
10008 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH)
10009 uErrorCode |= X86_TRAP_PF_ID;
10010 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE)
10011 uErrorCode |= X86_TRAP_PF_RW;
10012 if (pVmxTransient->uExitQualification & VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT)
10013 uErrorCode |= X86_TRAP_PF_P;
10014
10015 TRPMAssertXcptPF(pVCpu, GCPhys, uErrorCode);
10016
10017 Log4(("EPT violation %#x at %#RX64 ErrorCode %#x CS:EIP=%04x:%#RX64\n", pVmxTransient->uExitQualification, GCPhys,
10018 uErrorCode, pMixedCtx->cs.Sel, pMixedCtx->rip));
10019
10020 /* Handle the pagefault trap for the nested shadow table. */
10021 PVM pVM = pVCpu->CTX_SUFF(pVM);
10022 rc = PGMR0Trap0eHandlerNestedPaging(pVM, pVCpu, PGMMODE_EPT, uErrorCode, CPUMCTX2CORE(pMixedCtx), GCPhys);
10023 TRPMResetTrap(pVCpu);
10024
10025 /* Same case as PGMR0Trap0eHandlerNPMisconfig(). See comment above, @bugref{6043}. */
10026 if ( rc == VINF_SUCCESS
10027 || rc == VERR_PAGE_TABLE_NOT_PRESENT
10028 || rc == VERR_PAGE_NOT_PRESENT)
10029 {
10030 /* Successfully synced our nested page tables. */
10031 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitReasonNpf);
10032 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
10033 return VINF_SUCCESS;
10034 }
10035
10036 Log4(("EPT return to ring-3 rc=%d\n"));
10037 return rc;
10038}
10039
10040/** @} */
10041
10042/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10043/* -=-=-=-=-=-=-=-=-=- VM-exit Exception Handlers -=-=-=-=-=-=-=-=-=-=- */
10044/* -=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-= */
10045
10046/** @name VM-exit exception handlers.
10047 * @{
10048 */
10049
10050/**
10051 * VM-exit exception handler for #MF (Math Fault: floating point exception).
10052 */
10053static int hmR0VmxExitXcptMF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10054{
10055 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10056 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestMF);
10057
10058 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10059 AssertRCReturn(rc, rc);
10060
10061 if (!(pMixedCtx->cr0 & X86_CR0_NE))
10062 {
10063 /* Old-style FPU error reporting needs some extra work. */
10064 /** @todo don't fall back to the recompiler, but do it manually. */
10065 return VERR_EM_INTERPRETER;
10066 }
10067
10068 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10069 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10070 return rc;
10071}
10072
10073
10074/**
10075 * VM-exit exception handler for #BP (Breakpoint exception).
10076 */
10077static int hmR0VmxExitXcptBP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10078{
10079 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10080 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestBP);
10081
10082 /** @todo Try optimize this by not saving the entire guest state unless
10083 * really needed. */
10084 int rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10085 AssertRCReturn(rc, rc);
10086
10087 PVM pVM = pVCpu->CTX_SUFF(pVM);
10088 rc = DBGFRZTrap03Handler(pVM, pVCpu, CPUMCTX2CORE(pMixedCtx));
10089 if (rc == VINF_EM_RAW_GUEST_TRAP)
10090 {
10091 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10092 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10093 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10094 AssertRCReturn(rc, rc);
10095
10096 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10097 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10098 }
10099
10100 Assert(rc == VINF_SUCCESS || rc == VINF_EM_RAW_GUEST_TRAP || rc == VINF_EM_DBG_BREAKPOINT);
10101 return rc;
10102}
10103
10104
10105/**
10106 * VM-exit exception handler for #DB (Debug exception).
10107 */
10108static int hmR0VmxExitXcptDB(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10109{
10110 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10111 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestDB);
10112 Log6(("XcptDB\n"));
10113
10114 /*
10115 * Get the DR6-like values from the exit qualification and pass it to DBGF
10116 * for processing.
10117 */
10118 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10119 AssertRCReturn(rc, rc);
10120
10121 /* Refer Intel spec. Table 27-1. "Exit Qualifications for debug exceptions" for the format. */
10122 uint64_t uDR6 = X86_DR6_INIT_VAL;
10123 uDR6 |= ( pVmxTransient->uExitQualification
10124 & (X86_DR6_B0 | X86_DR6_B1 | X86_DR6_B2 | X86_DR6_B3 | X86_DR6_BD | X86_DR6_BS));
10125
10126 rc = DBGFRZTrap01Handler(pVCpu->CTX_SUFF(pVM), pVCpu, CPUMCTX2CORE(pMixedCtx), uDR6, pVCpu->hm.s.fSingleInstruction);
10127 if (rc == VINF_EM_RAW_GUEST_TRAP)
10128 {
10129 /*
10130 * The exception was for the guest. Update DR6, DR7.GD and
10131 * IA32_DEBUGCTL.LBR before forwarding it.
10132 * (See Intel spec. 27.1 "Architectural State before a VM-Exit".)
10133 */
10134 HM_DISABLE_PREEMPT_IF_NEEDED();
10135
10136 pMixedCtx->dr[6] &= ~X86_DR6_B_MASK;
10137 pMixedCtx->dr[6] |= uDR6;
10138 if (CPUMIsGuestDebugStateActive(pVCpu))
10139 ASMSetDR6(pMixedCtx->dr[6]);
10140
10141 HM_RESTORE_PREEMPT_IF_NEEDED();
10142
10143 rc = hmR0VmxSaveGuestDR7(pVCpu, pMixedCtx);
10144 AssertRCReturn(rc, rc);
10145
10146 /* X86_DR7_GD will be cleared if DRx accesses should be trapped inside the guest. */
10147 pMixedCtx->dr[7] &= ~X86_DR7_GD;
10148
10149 /* Paranoia. */
10150 pMixedCtx->dr[7] &= ~X86_DR7_RAZ_MASK;
10151 pMixedCtx->dr[7] |= X86_DR7_RA1_MASK;
10152
10153 rc = VMXWriteVmcs32(VMX_VMCS_GUEST_DR7, (uint32_t)pMixedCtx->dr[7]);
10154 AssertRCReturn(rc, rc);
10155
10156 /*
10157 * Raise #DB in the guest.
10158 */
10159 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10160 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10161 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10162 AssertRCReturn(rc, rc);
10163 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10164 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10165 return VINF_SUCCESS;
10166 }
10167
10168 /*
10169 * Not a guest trap, must be a hypervisor related debug event then.
10170 * Update DR6 in case someone is interested in it.
10171 */
10172 AssertMsg(rc == VINF_EM_DBG_STEPPED || rc == VINF_EM_DBG_BREAKPOINT, ("%Rrc\n", rc));
10173 AssertReturn(CPUMIsHyperDebugStateActive(pVCpu), VERR_HM_IPE_5);
10174 CPUMSetHyperDR6(pVCpu, uDR6);
10175
10176 return rc;
10177}
10178
10179
10180/**
10181 * VM-exit exception handler for #NM (Device-not-available exception: floating
10182 * point exception).
10183 */
10184static int hmR0VmxExitXcptNM(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10185{
10186 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10187
10188#ifndef HMVMX_ALWAYS_TRAP_ALL_XCPTS
10189 Assert(!CPUMIsGuestFPUStateActive(pVCpu));
10190#endif
10191
10192 /* We require CR0 and EFER. EFER is always up-to-date. */
10193 int rc = hmR0VmxSaveGuestCR0(pVCpu, pMixedCtx);
10194 AssertRCReturn(rc, rc);
10195
10196 /* We're playing with the host CPU state here, have to disable preemption. */
10197 HM_DISABLE_PREEMPT_IF_NEEDED();
10198
10199 /* Lazy FPU loading; load the guest-FPU state transparently and continue execution of the guest. */
10200 PVM pVM = pVCpu->CTX_SUFF(pVM);
10201 rc = CPUMR0LoadGuestFPU(pVM, pVCpu, pMixedCtx);
10202 if (rc == VINF_SUCCESS)
10203 {
10204 Assert(CPUMIsGuestFPUStateActive(pVCpu));
10205 HM_RESTORE_PREEMPT_IF_NEEDED();
10206
10207 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_CR0;
10208 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowNM);
10209 return VINF_SUCCESS;
10210 }
10211
10212 HM_RESTORE_PREEMPT_IF_NEEDED();
10213
10214 /* Forward #NM to the guest. */
10215 Assert(rc == VINF_EM_RAW_GUEST_TRAP);
10216 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10217 AssertRCReturn(rc, rc);
10218 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10219 pVmxTransient->cbInstr, 0 /* error code */, 0 /* GCPtrFaultAddress */);
10220 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestNM);
10221 return rc;
10222}
10223
10224
10225/**
10226 * VM-exit exception handler for #GP (General-protection exception).
10227 *
10228 * @remarks Requires pVmxTransient->uExitIntrInfo to be up-to-date.
10229 */
10230static int hmR0VmxExitXcptGP(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10231{
10232 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10233 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestGP);
10234
10235 int rc = VERR_INTERNAL_ERROR_5;
10236 if (!pVCpu->hm.s.vmx.RealMode.fRealOnV86Active)
10237 {
10238#ifdef HMVMX_ALWAYS_TRAP_ALL_XCPTS
10239 /* If the guest is not in real-mode or we have unrestricted execution support, reflect #GP to the guest. */
10240 rc = hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10241 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10242 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10243 rc |= hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10244 AssertRCReturn(rc, rc);
10245 Log4(("#GP Gst: RIP %#RX64 ErrorCode=%#x CR0=%#RX64 CPL=%u\n", pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode,
10246 pMixedCtx->cr0, CPUMGetGuestCPL(pVCpu)));
10247 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10248 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10249 return rc;
10250#else
10251 /* We don't intercept #GP. */
10252 AssertMsgFailed(("Unexpected VM-exit caused by #GP exception\n"));
10253 return VERR_VMX_UNEXPECTED_EXCEPTION;
10254#endif
10255 }
10256
10257 Assert(CPUMIsGuestInRealModeEx(pMixedCtx));
10258 Assert(!pVCpu->CTX_SUFF(pVM)->hm.s.vmx.fUnrestrictedGuest);
10259
10260 /* EMInterpretDisasCurrent() requires a lot of the state, save the entire state. */
10261 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10262 AssertRCReturn(rc, rc);
10263
10264 PDISCPUSTATE pDis = &pVCpu->hm.s.DisState;
10265 uint32_t cbOp = 0;
10266 PVM pVM = pVCpu->CTX_SUFF(pVM);
10267 rc = EMInterpretDisasCurrent(pVM, pVCpu, pDis, &cbOp);
10268 if (RT_SUCCESS(rc))
10269 {
10270 rc = VINF_SUCCESS;
10271 Assert(cbOp == pDis->cbInstr);
10272 Log4(("#GP Disas OpCode=%u CS:EIP %04x:%#RX64\n", pDis->pCurInstr->uOpcode, pMixedCtx->cs.Sel, pMixedCtx->rip));
10273 switch (pDis->pCurInstr->uOpcode)
10274 {
10275 case OP_CLI:
10276 {
10277 pMixedCtx->eflags.Bits.u1IF = 0;
10278 pMixedCtx->rip += pDis->cbInstr;
10279 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
10280 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitCli);
10281 break;
10282 }
10283
10284 case OP_STI:
10285 {
10286 pMixedCtx->eflags.Bits.u1IF = 1;
10287 pMixedCtx->rip += pDis->cbInstr;
10288 EMSetInhibitInterruptsPC(pVCpu, pMixedCtx->rip);
10289 Assert(VMCPU_FF_IS_SET(pVCpu, VMCPU_FF_INHIBIT_INTERRUPTS));
10290 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RFLAGS;
10291 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitSti);
10292 break;
10293 }
10294
10295 case OP_HLT:
10296 {
10297 rc = VINF_EM_HALT;
10298 pMixedCtx->rip += pDis->cbInstr;
10299 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP;
10300 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitHlt);
10301 break;
10302 }
10303
10304 case OP_POPF:
10305 {
10306 Log4(("POPF CS:RIP %04x:%#RX64\n", pMixedCtx->cs.Sel, pMixedCtx->rip));
10307 uint32_t cbParm = 0;
10308 uint32_t uMask = 0;
10309 if (pDis->fPrefix & DISPREFIX_OPSIZE)
10310 {
10311 cbParm = 4;
10312 uMask = 0xffffffff;
10313 }
10314 else
10315 {
10316 cbParm = 2;
10317 uMask = 0xffff;
10318 }
10319
10320 /* Get the stack pointer & pop the contents of the stack onto Eflags. */
10321 RTGCPTR GCPtrStack = 0;
10322 X86EFLAGS Eflags;
10323 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
10324 &GCPtrStack);
10325 if (RT_SUCCESS(rc))
10326 {
10327 Assert(sizeof(Eflags.u32) >= cbParm);
10328 Eflags.u32 = 0;
10329 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u32, cbParm);
10330 }
10331 if (RT_FAILURE(rc))
10332 {
10333 rc = VERR_EM_INTERPRETER;
10334 break;
10335 }
10336 Log4(("POPF %#x -> %#RX64 mask=%#x RIP=%#RX64\n", Eflags.u, pMixedCtx->rsp, uMask, pMixedCtx->rip));
10337 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
10338 | (Eflags.u32 & X86_EFL_POPF_BITS & uMask);
10339 /* The RF bit is always cleared by POPF; see Intel Instruction reference for POPF. */
10340 pMixedCtx->eflags.Bits.u1RF = 0;
10341 pMixedCtx->esp += cbParm;
10342 pMixedCtx->esp &= uMask;
10343 pMixedCtx->rip += pDis->cbInstr;
10344 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS;
10345 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPopf);
10346 break;
10347 }
10348
10349 case OP_PUSHF:
10350 {
10351 uint32_t cbParm = 0;
10352 uint32_t uMask = 0;
10353 if (pDis->fPrefix & DISPREFIX_OPSIZE)
10354 {
10355 cbParm = 4;
10356 uMask = 0xffffffff;
10357 }
10358 else
10359 {
10360 cbParm = 2;
10361 uMask = 0xffff;
10362 }
10363
10364 /* Get the stack pointer & push the contents of eflags onto the stack. */
10365 RTGCPTR GCPtrStack = 0;
10366 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), (pMixedCtx->esp - cbParm) & uMask,
10367 SELMTOFLAT_FLAGS_CPL0, &GCPtrStack);
10368 if (RT_FAILURE(rc))
10369 {
10370 rc = VERR_EM_INTERPRETER;
10371 break;
10372 }
10373 X86EFLAGS Eflags = pMixedCtx->eflags;
10374 /* The RF & VM bits are cleared on image stored on stack; see Intel Instruction reference for PUSHF. */
10375 Eflags.Bits.u1RF = 0;
10376 Eflags.Bits.u1VM = 0;
10377
10378 rc = PGMPhysWrite(pVM, (RTGCPHYS)GCPtrStack, &Eflags.u, cbParm);
10379 if (RT_FAILURE(rc))
10380 {
10381 rc = VERR_EM_INTERPRETER;
10382 break;
10383 }
10384 Log4(("PUSHF %#x -> %#RGv\n", Eflags.u, GCPtrStack));
10385 pMixedCtx->esp -= cbParm;
10386 pMixedCtx->esp &= uMask;
10387 pMixedCtx->rip += pDis->cbInstr;
10388 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP;
10389 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitPushf);
10390 break;
10391 }
10392
10393 case OP_IRET:
10394 {
10395 /** @todo Handle 32-bit operand sizes and check stack limits. See Intel
10396 * instruction reference. */
10397 RTGCPTR GCPtrStack = 0;
10398 uint32_t uMask = 0xffff;
10399 uint16_t aIretFrame[3];
10400 if (pDis->fPrefix & (DISPREFIX_OPSIZE | DISPREFIX_ADDRSIZE))
10401 {
10402 rc = VERR_EM_INTERPRETER;
10403 break;
10404 }
10405 rc = SELMToFlatEx(pVCpu, DISSELREG_SS, CPUMCTX2CORE(pMixedCtx), pMixedCtx->esp & uMask, SELMTOFLAT_FLAGS_CPL0,
10406 &GCPtrStack);
10407 if (RT_SUCCESS(rc))
10408 rc = PGMPhysRead(pVM, (RTGCPHYS)GCPtrStack, &aIretFrame[0], sizeof(aIretFrame));
10409 if (RT_FAILURE(rc))
10410 {
10411 rc = VERR_EM_INTERPRETER;
10412 break;
10413 }
10414 pMixedCtx->eip = 0;
10415 pMixedCtx->ip = aIretFrame[0];
10416 pMixedCtx->cs.Sel = aIretFrame[1];
10417 pMixedCtx->cs.ValidSel = aIretFrame[1];
10418 pMixedCtx->cs.u64Base = (uint64_t)pMixedCtx->cs.Sel << 4;
10419 pMixedCtx->eflags.u32 = (pMixedCtx->eflags.u32 & ~(X86_EFL_POPF_BITS & uMask))
10420 | (aIretFrame[2] & X86_EFL_POPF_BITS & uMask);
10421 pMixedCtx->sp += sizeof(aIretFrame);
10422 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_SEGMENT_REGS | HM_CHANGED_GUEST_RSP
10423 | HM_CHANGED_GUEST_RFLAGS;
10424 Log4(("IRET %#RX32 to %04x:%x\n", GCPtrStack, pMixedCtx->cs.Sel, pMixedCtx->ip));
10425 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitIret);
10426 break;
10427 }
10428
10429 case OP_INT:
10430 {
10431 uint16_t uVector = pDis->Param1.uValue & 0xff;
10432 hmR0VmxSetPendingIntN(pVCpu, pMixedCtx, uVector, pDis->cbInstr);
10433 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
10434 break;
10435 }
10436
10437 case OP_INTO:
10438 {
10439 if (pMixedCtx->eflags.Bits.u1OF)
10440 {
10441 hmR0VmxSetPendingXcptOF(pVCpu, pMixedCtx, pDis->cbInstr);
10442 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitInt);
10443 }
10444 break;
10445 }
10446
10447 default:
10448 {
10449 VBOXSTRICTRC rc2 = EMInterpretInstructionDisasState(pVCpu, pDis, CPUMCTX2CORE(pMixedCtx), 0 /* pvFault */,
10450 EMCODETYPE_SUPERVISOR);
10451 rc = VBOXSTRICTRC_VAL(rc2);
10452 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_ALL_GUEST;
10453 Log4(("#GP rc=%Rrc\n", rc));
10454 break;
10455 }
10456 }
10457 }
10458 else
10459 rc = VERR_EM_INTERPRETER;
10460
10461 AssertMsg(rc == VINF_SUCCESS || rc == VERR_EM_INTERPRETER || rc == VINF_PGM_CHANGE_MODE || rc == VINF_EM_HALT,
10462 ("#GP Unexpected rc=%Rrc\n", rc));
10463 return rc;
10464}
10465
10466
10467/**
10468 * VM-exit exception handler wrapper for generic exceptions. Simply re-injects
10469 * the exception reported in the VMX transient structure back into the VM.
10470 *
10471 * @remarks Requires uExitIntrInfo in the VMX transient structure to be
10472 * up-to-date.
10473 */
10474static int hmR0VmxExitXcptGeneric(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10475{
10476 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10477
10478 /* Re-inject the exception into the guest. This cannot be a double-fault condition which would have been handled in
10479 hmR0VmxCheckExitDueToEventDelivery(). */
10480 int rc = hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10481 rc |= hmR0VmxReadExitInstrLenVmcs(pVCpu, pVmxTransient);
10482 AssertRCReturn(rc, rc);
10483 Assert(pVmxTransient->fVmcsFieldsRead & HMVMX_UPDATED_TRANSIENT_EXIT_INTERRUPTION_INFO);
10484
10485 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10486 pVmxTransient->cbInstr, pVmxTransient->uExitIntrErrorCode, 0 /* GCPtrFaultAddress */);
10487 return VINF_SUCCESS;
10488}
10489
10490
10491/**
10492 * VM-exit exception handler for #PF (Page-fault exception).
10493 */
10494static int hmR0VmxExitXcptPF(PVMCPU pVCpu, PCPUMCTX pMixedCtx, PVMXTRANSIENT pVmxTransient)
10495{
10496 HMVMX_VALIDATE_EXIT_XCPT_HANDLER_PARAMS();
10497 PVM pVM = pVCpu->CTX_SUFF(pVM);
10498 int rc = hmR0VmxReadExitQualificationVmcs(pVCpu, pVmxTransient);
10499 rc |= hmR0VmxReadExitIntrInfoVmcs(pVCpu, pVmxTransient);
10500 rc |= hmR0VmxReadExitIntrErrorCodeVmcs(pVCpu, pVmxTransient);
10501 AssertRCReturn(rc, rc);
10502
10503#if defined(HMVMX_ALWAYS_TRAP_ALL_XCPTS) || defined(HMVMX_ALWAYS_TRAP_PF)
10504 if (pVM->hm.s.fNestedPaging)
10505 {
10506 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory or vectoring #PF. */
10507 if (RT_LIKELY(!pVmxTransient->fVectoringPF))
10508 {
10509 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
10510 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10511 0 /* cbInstr */, pVmxTransient->uExitIntrErrorCode, pVmxTransient->uExitQualification);
10512 }
10513 else
10514 {
10515 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
10516 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
10517 Log4(("Pending #DF due to vectoring #PF. NP\n"));
10518 }
10519 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
10520 return rc;
10521 }
10522#else
10523 Assert(!pVM->hm.s.fNestedPaging);
10524#endif
10525
10526 rc = hmR0VmxSaveGuestState(pVCpu, pMixedCtx);
10527 AssertRCReturn(rc, rc);
10528
10529 Log4(("#PF: cr2=%#RX64 cs:rip=%#04x:%#RX64 uErrCode %#RX32 cr3=%#RX64\n", pVmxTransient->uExitQualification,
10530 pMixedCtx->cs.Sel, pMixedCtx->rip, pVmxTransient->uExitIntrErrorCode, pMixedCtx->cr3));
10531
10532 TRPMAssertXcptPF(pVCpu, pVmxTransient->uExitQualification, (RTGCUINT)pVmxTransient->uExitIntrErrorCode);
10533 rc = PGMTrap0eHandler(pVCpu, pVmxTransient->uExitIntrErrorCode, CPUMCTX2CORE(pMixedCtx),
10534 (RTGCPTR)pVmxTransient->uExitQualification);
10535
10536 Log4(("#PF: rc=%Rrc\n", rc));
10537 if (rc == VINF_SUCCESS)
10538 {
10539 /* Successfully synced shadow pages tables or emulated an MMIO instruction. */
10540 /** @todo this isn't quite right, what if guest does lgdt with some MMIO
10541 * memory? We don't update the whole state here... */
10542 pVCpu->hm.s.fContextUseFlags |= HM_CHANGED_GUEST_RIP | HM_CHANGED_GUEST_RSP | HM_CHANGED_GUEST_RFLAGS
10543 | HM_CHANGED_VMX_GUEST_APIC_STATE;
10544 TRPMResetTrap(pVCpu);
10545 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPF);
10546 return rc;
10547 }
10548 else if (rc == VINF_EM_RAW_GUEST_TRAP)
10549 {
10550 if (!pVmxTransient->fVectoringPF)
10551 {
10552 /* It's a guest page fault and needs to be reflected to the guest. */
10553 uint32_t uGstErrorCode = TRPMGetErrorCode(pVCpu);
10554 TRPMResetTrap(pVCpu);
10555 pVCpu->hm.s.Event.fPending = false; /* In case it's a contributory #PF. */
10556 pMixedCtx->cr2 = pVmxTransient->uExitQualification; /* Update here in case we go back to ring-3 before injection. */
10557 hmR0VmxSetPendingEvent(pVCpu, VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(pVmxTransient->uExitIntrInfo),
10558 0 /* cbInstr */, uGstErrorCode, pVmxTransient->uExitQualification);
10559 }
10560 else
10561 {
10562 /* A guest page-fault occurred during delivery of a page-fault. Inject #DF. */
10563 TRPMResetTrap(pVCpu);
10564 pVCpu->hm.s.Event.fPending = false; /* Clear pending #PF to replace it with #DF. */
10565 hmR0VmxSetPendingXcptDF(pVCpu, pMixedCtx);
10566 Log4(("#PF: Pending #DF due to vectoring #PF\n"));
10567 }
10568
10569 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitGuestPF);
10570 return VINF_SUCCESS;
10571 }
10572
10573 TRPMResetTrap(pVCpu);
10574 STAM_COUNTER_INC(&pVCpu->hm.s.StatExitShadowPFEM);
10575 return rc;
10576}
10577
10578/** @} */
10579
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette